]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.1.1-201111170037.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.1-201111170037.patch
CommitLineData
317c81b4
PK
1diff -urNp linux-3.1.1/arch/alpha/include/asm/elf.h linux-3.1.1/arch/alpha/include/asm/elf.h
2--- linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
3+++ linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.1.1/arch/alpha/include/asm/pgtable.h linux-3.1.1/arch/alpha/include/asm/pgtable.h
19--- linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
20+++ linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.1.1/arch/alpha/kernel/module.c linux-3.1.1/arch/alpha/kernel/module.c
40--- linux-3.1.1/arch/alpha/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
41+++ linux-3.1.1/arch/alpha/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
42@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.1.1/arch/alpha/kernel/osf_sys.c linux-3.1.1/arch/alpha/kernel/osf_sys.c
52--- linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-11 15:19:27.000000000 -0500
53+++ linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-16 18:39:07.000000000 -0500
54@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.1.1/arch/alpha/mm/fault.c linux-3.1.1/arch/alpha/mm/fault.c
86--- linux-3.1.1/arch/alpha/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
87+++ linux-3.1.1/arch/alpha/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.1.1/arch/arm/include/asm/elf.h linux-3.1.1/arch/arm/include/asm/elf.h
245--- linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
246+++ linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.1.1/arch/arm/include/asm/kmap_types.h linux-3.1.1/arch/arm/include/asm/kmap_types.h
275--- linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
276+++ linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.1.1/arch/arm/include/asm/uaccess.h linux-3.1.1/arch/arm/include/asm/uaccess.h
286--- linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
287+++ linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.1.1/arch/arm/kernel/armksyms.c linux-3.1.1/arch/arm/kernel/armksyms.c
344--- linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-11 15:19:27.000000000 -0500
345+++ linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-16 18:39:07.000000000 -0500
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.1.1/arch/arm/kernel/process.c linux-3.1.1/arch/arm/kernel/process.c
358--- linux-3.1.1/arch/arm/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
359+++ linux-3.1.1/arch/arm/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366 #include <linux/cpuidle.h>
367
368@@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.1.1/arch/arm/kernel/traps.c linux-3.1.1/arch/arm/kernel/traps.c
382--- linux-3.1.1/arch/arm/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
383+++ linux-3.1.1/arch/arm/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.1.1/arch/arm/lib/copy_from_user.S linux-3.1.1/arch/arm/lib/copy_from_user.S
404--- linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-11 15:19:27.000000000 -0500
405+++ linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-16 18:39:07.000000000 -0500
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.1.1/arch/arm/lib/copy_to_user.S linux-3.1.1/arch/arm/lib/copy_to_user.S
430--- linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-11 15:19:27.000000000 -0500
431+++ linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-16 18:39:07.000000000 -0500
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.1.1/arch/arm/lib/uaccess.S linux-3.1.1/arch/arm/lib/uaccess.S
456--- linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-11 15:19:27.000000000 -0500
457+++ linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-16 18:39:07.000000000 -0500
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-11 15:19:27.000000000 -0500
513+++ linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-16 18:39:07.000000000 -0500
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-11 15:19:27.000000000 -0500
525+++ linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-16 18:40:08.000000000 -0500
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.1.1/arch/arm/mm/fault.c linux-3.1.1/arch/arm/mm/fault.c
536--- linux-3.1.1/arch/arm/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
537+++ linux-3.1.1/arch/arm/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.1.1/arch/arm/mm/mmap.c linux-3.1.1/arch/arm/mm/mmap.c
587--- linux-3.1.1/arch/arm/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
588+++ linux-3.1.1/arch/arm/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.1.1/arch/avr32/include/asm/elf.h linux-3.1.1/arch/avr32/include/asm/elf.h
639--- linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
640+++ linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.1.1/arch/avr32/include/asm/kmap_types.h linux-3.1.1/arch/avr32/include/asm/kmap_types.h
658--- linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
659+++ linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.1.1/arch/avr32/mm/fault.c linux-3.1.1/arch/avr32/mm/fault.c
671--- linux-3.1.1/arch/avr32/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
672+++ linux-3.1.1/arch/avr32/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.1.1/arch/frv/include/asm/kmap_types.h linux-3.1.1/arch/frv/include/asm/kmap_types.h
715--- linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
716+++ linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.1.1/arch/frv/mm/elf-fdpic.c linux-3.1.1/arch/frv/mm/elf-fdpic.c
726--- linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-11 15:19:27.000000000 -0500
727+++ linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-16 18:39:07.000000000 -0500
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.1.1/arch/ia64/include/asm/elf.h linux-3.1.1/arch/ia64/include/asm/elf.h
757--- linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
758+++ linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.1.1/arch/ia64/include/asm/pgtable.h linux-3.1.1/arch/ia64/include/asm/pgtable.h
774--- linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
775+++ linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.1.1/arch/ia64/include/asm/spinlock.h linux-3.1.1/arch/ia64/include/asm/spinlock.h
804--- linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
805+++ linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.1.1/arch/ia64/include/asm/uaccess.h linux-3.1.1/arch/ia64/include/asm/uaccess.h
816--- linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
817+++ linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.1.1/arch/ia64/kernel/module.c linux-3.1.1/arch/ia64/kernel/module.c
837--- linux-3.1.1/arch/ia64/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
838+++ linux-3.1.1/arch/ia64/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
839@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.1.1/arch/ia64/kernel/sys_ia64.c linux-3.1.1/arch/ia64/kernel/sys_ia64.c
928--- linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-11 15:19:27.000000000 -0500
929+++ linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-16 18:39:07.000000000 -0500
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
964+++ linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.1.1/arch/ia64/mm/fault.c linux-3.1.1/arch/ia64/mm/fault.c
975--- linux-3.1.1/arch/ia64/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
976+++ linux-3.1.1/arch/ia64/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.1.1/arch/ia64/mm/hugetlbpage.c linux-3.1.1/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
1028+++ linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.1.1/arch/ia64/mm/init.c linux-3.1.1/arch/ia64/mm/init.c
1039--- linux-3.1.1/arch/ia64/mm/init.c 2011-11-11 15:19:27.000000000 -0500
1040+++ linux-3.1.1/arch/ia64/mm/init.c 2011-11-16 18:39:07.000000000 -0500
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.1.1/arch/m32r/lib/usercopy.c linux-3.1.1/arch/m32r/lib/usercopy.c
1062--- linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-11 15:19:27.000000000 -0500
1063+++ linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-16 18:39:07.000000000 -0500
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.1.1/arch/mips/include/asm/elf.h linux-3.1.1/arch/mips/include/asm/elf.h
1085--- linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1086+++ linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.1.1/arch/mips/include/asm/page.h linux-3.1.1/arch/mips/include/asm/page.h
1109--- linux-3.1.1/arch/mips/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1110+++ linux-3.1.1/arch/mips/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.1.1/arch/mips/include/asm/system.h linux-3.1.1/arch/mips/include/asm/system.h
1121--- linux-3.1.1/arch/mips/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1122+++ linux-3.1.1/arch/mips/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-11 15:19:27.000000000 -0500
1133+++ linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-16 18:39:07.000000000 -0500
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-11 15:19:27.000000000 -0500
1150+++ linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-16 18:39:07.000000000 -0500
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.1.1/arch/mips/kernel/process.c linux-3.1.1/arch/mips/kernel/process.c
1166--- linux-3.1.1/arch/mips/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
1167+++ linux-3.1.1/arch/mips/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
1168@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.1.1/arch/mips/mm/fault.c linux-3.1.1/arch/mips/mm/fault.c
1185--- linux-3.1.1/arch/mips/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1186+++ linux-3.1.1/arch/mips/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.1.1/arch/mips/mm/mmap.c linux-3.1.1/arch/mips/mm/mmap.c
1212--- linux-3.1.1/arch/mips/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
1213+++ linux-3.1.1/arch/mips/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
1214@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
1215 do_color_align = 1;
1216
1217 /* requesting a specific address */
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
1227 addr = PAGE_ALIGN(addr);
1228
1229 vma = find_vma(mm, addr);
1230- if (TASK_SIZE - len >= addr &&
1231- (!vma || addr + len <= vma->vm_start))
1232+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1233 return addr;
1234 }
1235
1236@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
1237 /* At this point: (!vma || addr < vma->vm_end). */
1238 if (TASK_SIZE - len < addr)
1239 return -ENOMEM;
1240- if (!vma || addr + len <= vma->vm_start)
1241+ if (check_heap_stack_gap(vmm, addr, len))
1242 return addr;
1243 addr = vma->vm_end;
1244 if (do_color_align)
1245@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
1246 /* make sure it can fit in the remaining address space */
1247 if (likely(addr > len)) {
1248 vma = find_vma(mm, addr - len);
1249- if (!vma || addr <= vma->vm_start) {
1250+ if (check_heap_stack_gap(vmm, addr - len, len))
1251 /* cache the address as a hint for next time */
1252 return mm->free_area_cache = addr - len;
1253 }
1254@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
1255 * return with success:
1256 */
1257 vma = find_vma(mm, addr);
1258- if (likely(!vma || addr + len <= vma->vm_start)) {
1259+ if (check_heap_stack_gap(vmm, addr, len)) {
1260 /* cache the address as a hint for next time */
1261 return mm->free_area_cache = addr;
1262 }
1263@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
1264 mm->unmap_area = arch_unmap_area_topdown;
1265 }
1266 }
1267-
1268-static inline unsigned long brk_rnd(void)
1269-{
1270- unsigned long rnd = get_random_int();
1271-
1272- rnd = rnd << PAGE_SHIFT;
1273- /* 8MB for 32bit, 256MB for 64bit */
1274- if (TASK_IS_32BIT_ADDR)
1275- rnd = rnd & 0x7ffffful;
1276- else
1277- rnd = rnd & 0xffffffful;
1278-
1279- return rnd;
1280-}
1281-
1282-unsigned long arch_randomize_brk(struct mm_struct *mm)
1283-{
1284- unsigned long base = mm->brk;
1285- unsigned long ret;
1286-
1287- ret = PAGE_ALIGN(base + brk_rnd());
1288-
1289- if (ret < mm->brk)
1290- return mm->brk;
1291-
1292- return ret;
1293-}
1294diff -urNp linux-3.1.1/arch/parisc/include/asm/elf.h linux-3.1.1/arch/parisc/include/asm/elf.h
1295--- linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1296+++ linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1297@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1298
1299 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1300
1301+#ifdef CONFIG_PAX_ASLR
1302+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1303+
1304+#define PAX_DELTA_MMAP_LEN 16
1305+#define PAX_DELTA_STACK_LEN 16
1306+#endif
1307+
1308 /* This yields a mask that user programs can use to figure out what
1309 instruction set this CPU supports. This could be done in user space,
1310 but it's not easy, and we've already done it here. */
1311diff -urNp linux-3.1.1/arch/parisc/include/asm/pgtable.h linux-3.1.1/arch/parisc/include/asm/pgtable.h
1312--- linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1313+++ linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1314@@ -210,6 +210,17 @@ struct vm_area_struct;
1315 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1316 #define PAGE_COPY PAGE_EXECREAD
1317 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1318+
1319+#ifdef CONFIG_PAX_PAGEEXEC
1320+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1321+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1322+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1323+#else
1324+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1325+# define PAGE_COPY_NOEXEC PAGE_COPY
1326+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1327+#endif
1328+
1329 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1330 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1331 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1332diff -urNp linux-3.1.1/arch/parisc/kernel/module.c linux-3.1.1/arch/parisc/kernel/module.c
1333--- linux-3.1.1/arch/parisc/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
1334+++ linux-3.1.1/arch/parisc/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
1335@@ -98,16 +98,38 @@
1336
1337 /* three functions to determine where in the module core
1338 * or init pieces the location is */
1339+static inline int in_init_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_init_rx &&
1342+ loc < (me->module_init_rx + me->init_size_rx));
1343+}
1344+
1345+static inline int in_init_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_init_rw &&
1348+ loc < (me->module_init_rw + me->init_size_rw));
1349+}
1350+
1351 static inline int in_init(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_init &&
1354- loc <= (me->module_init + me->init_size));
1355+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1356+}
1357+
1358+static inline int in_core_rx(struct module *me, void *loc)
1359+{
1360+ return (loc >= me->module_core_rx &&
1361+ loc < (me->module_core_rx + me->core_size_rx));
1362+}
1363+
1364+static inline int in_core_rw(struct module *me, void *loc)
1365+{
1366+ return (loc >= me->module_core_rw &&
1367+ loc < (me->module_core_rw + me->core_size_rw));
1368 }
1369
1370 static inline int in_core(struct module *me, void *loc)
1371 {
1372- return (loc >= me->module_core &&
1373- loc <= (me->module_core + me->core_size));
1374+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1375 }
1376
1377 static inline int in_local(struct module *me, void *loc)
1378@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1379 }
1380
1381 /* align things a bit */
1382- me->core_size = ALIGN(me->core_size, 16);
1383- me->arch.got_offset = me->core_size;
1384- me->core_size += gots * sizeof(struct got_entry);
1385-
1386- me->core_size = ALIGN(me->core_size, 16);
1387- me->arch.fdesc_offset = me->core_size;
1388- me->core_size += fdescs * sizeof(Elf_Fdesc);
1389+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1390+ me->arch.got_offset = me->core_size_rw;
1391+ me->core_size_rw += gots * sizeof(struct got_entry);
1392+
1393+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1394+ me->arch.fdesc_offset = me->core_size_rw;
1395+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1396
1397 me->arch.got_max = gots;
1398 me->arch.fdesc_max = fdescs;
1399@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1400
1401 BUG_ON(value == 0);
1402
1403- got = me->module_core + me->arch.got_offset;
1404+ got = me->module_core_rw + me->arch.got_offset;
1405 for (i = 0; got[i].addr; i++)
1406 if (got[i].addr == value)
1407 goto out;
1408@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1409 #ifdef CONFIG_64BIT
1410 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1411 {
1412- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1413+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1414
1415 if (!value) {
1416 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1417@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1418
1419 /* Create new one */
1420 fdesc->addr = value;
1421- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1422+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1423 return (Elf_Addr)fdesc;
1424 }
1425 #endif /* CONFIG_64BIT */
1426@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1427
1428 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1429 end = table + sechdrs[me->arch.unwind_section].sh_size;
1430- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1431+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1432
1433 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1434 me->arch.unwind_section, table, end, gp);
1435diff -urNp linux-3.1.1/arch/parisc/kernel/sys_parisc.c linux-3.1.1/arch/parisc/kernel/sys_parisc.c
1436--- linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-11 15:19:27.000000000 -0500
1437+++ linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-16 18:39:07.000000000 -0500
1438@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1439 /* At this point: (!vma || addr < vma->vm_end). */
1440 if (TASK_SIZE - len < addr)
1441 return -ENOMEM;
1442- if (!vma || addr + len <= vma->vm_start)
1443+ if (check_heap_stack_gap(vma, addr, len))
1444 return addr;
1445 addr = vma->vm_end;
1446 }
1447@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1448 /* At this point: (!vma || addr < vma->vm_end). */
1449 if (TASK_SIZE - len < addr)
1450 return -ENOMEM;
1451- if (!vma || addr + len <= vma->vm_start)
1452+ if (check_heap_stack_gap(vma, addr, len))
1453 return addr;
1454 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1455 if (addr < vma->vm_end) /* handle wraparound */
1456@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1457 if (flags & MAP_FIXED)
1458 return addr;
1459 if (!addr)
1460- addr = TASK_UNMAPPED_BASE;
1461+ addr = current->mm->mmap_base;
1462
1463 if (filp) {
1464 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1465diff -urNp linux-3.1.1/arch/parisc/kernel/traps.c linux-3.1.1/arch/parisc/kernel/traps.c
1466--- linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
1467+++ linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
1468@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1469
1470 down_read(&current->mm->mmap_sem);
1471 vma = find_vma(current->mm,regs->iaoq[0]);
1472- if (vma && (regs->iaoq[0] >= vma->vm_start)
1473- && (vma->vm_flags & VM_EXEC)) {
1474-
1475+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1476 fault_address = regs->iaoq[0];
1477 fault_space = regs->iasq[0];
1478
1479diff -urNp linux-3.1.1/arch/parisc/mm/fault.c linux-3.1.1/arch/parisc/mm/fault.c
1480--- linux-3.1.1/arch/parisc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1481+++ linux-3.1.1/arch/parisc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1482@@ -15,6 +15,7 @@
1483 #include <linux/sched.h>
1484 #include <linux/interrupt.h>
1485 #include <linux/module.h>
1486+#include <linux/unistd.h>
1487
1488 #include <asm/uaccess.h>
1489 #include <asm/traps.h>
1490@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1491 static unsigned long
1492 parisc_acctyp(unsigned long code, unsigned int inst)
1493 {
1494- if (code == 6 || code == 16)
1495+ if (code == 6 || code == 7 || code == 16)
1496 return VM_EXEC;
1497
1498 switch (inst & 0xf0000000) {
1499@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1500 }
1501 #endif
1502
1503+#ifdef CONFIG_PAX_PAGEEXEC
1504+/*
1505+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1506+ *
1507+ * returns 1 when task should be killed
1508+ * 2 when rt_sigreturn trampoline was detected
1509+ * 3 when unpatched PLT trampoline was detected
1510+ */
1511+static int pax_handle_fetch_fault(struct pt_regs *regs)
1512+{
1513+
1514+#ifdef CONFIG_PAX_EMUPLT
1515+ int err;
1516+
1517+ do { /* PaX: unpatched PLT emulation */
1518+ unsigned int bl, depwi;
1519+
1520+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1521+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1522+
1523+ if (err)
1524+ break;
1525+
1526+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1527+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1528+
1529+ err = get_user(ldw, (unsigned int *)addr);
1530+ err |= get_user(bv, (unsigned int *)(addr+4));
1531+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1532+
1533+ if (err)
1534+ break;
1535+
1536+ if (ldw == 0x0E801096U &&
1537+ bv == 0xEAC0C000U &&
1538+ ldw2 == 0x0E881095U)
1539+ {
1540+ unsigned int resolver, map;
1541+
1542+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1543+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1544+ if (err)
1545+ break;
1546+
1547+ regs->gr[20] = instruction_pointer(regs)+8;
1548+ regs->gr[21] = map;
1549+ regs->gr[22] = resolver;
1550+ regs->iaoq[0] = resolver | 3UL;
1551+ regs->iaoq[1] = regs->iaoq[0] + 4;
1552+ return 3;
1553+ }
1554+ }
1555+ } while (0);
1556+#endif
1557+
1558+#ifdef CONFIG_PAX_EMUTRAMP
1559+
1560+#ifndef CONFIG_PAX_EMUSIGRT
1561+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1562+ return 1;
1563+#endif
1564+
1565+ do { /* PaX: rt_sigreturn emulation */
1566+ unsigned int ldi1, ldi2, bel, nop;
1567+
1568+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1569+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1570+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1571+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1572+
1573+ if (err)
1574+ break;
1575+
1576+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1577+ ldi2 == 0x3414015AU &&
1578+ bel == 0xE4008200U &&
1579+ nop == 0x08000240U)
1580+ {
1581+ regs->gr[25] = (ldi1 & 2) >> 1;
1582+ regs->gr[20] = __NR_rt_sigreturn;
1583+ regs->gr[31] = regs->iaoq[1] + 16;
1584+ regs->sr[0] = regs->iasq[1];
1585+ regs->iaoq[0] = 0x100UL;
1586+ regs->iaoq[1] = regs->iaoq[0] + 4;
1587+ regs->iasq[0] = regs->sr[2];
1588+ regs->iasq[1] = regs->sr[2];
1589+ return 2;
1590+ }
1591+ } while (0);
1592+#endif
1593+
1594+ return 1;
1595+}
1596+
1597+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1598+{
1599+ unsigned long i;
1600+
1601+ printk(KERN_ERR "PAX: bytes at PC: ");
1602+ for (i = 0; i < 5; i++) {
1603+ unsigned int c;
1604+ if (get_user(c, (unsigned int *)pc+i))
1605+ printk(KERN_CONT "???????? ");
1606+ else
1607+ printk(KERN_CONT "%08x ", c);
1608+ }
1609+ printk("\n");
1610+}
1611+#endif
1612+
1613 int fixup_exception(struct pt_regs *regs)
1614 {
1615 const struct exception_table_entry *fix;
1616@@ -192,8 +303,33 @@ good_area:
1617
1618 acc_type = parisc_acctyp(code,regs->iir);
1619
1620- if ((vma->vm_flags & acc_type) != acc_type)
1621+ if ((vma->vm_flags & acc_type) != acc_type) {
1622+
1623+#ifdef CONFIG_PAX_PAGEEXEC
1624+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1625+ (address & ~3UL) == instruction_pointer(regs))
1626+ {
1627+ up_read(&mm->mmap_sem);
1628+ switch (pax_handle_fetch_fault(regs)) {
1629+
1630+#ifdef CONFIG_PAX_EMUPLT
1631+ case 3:
1632+ return;
1633+#endif
1634+
1635+#ifdef CONFIG_PAX_EMUTRAMP
1636+ case 2:
1637+ return;
1638+#endif
1639+
1640+ }
1641+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1642+ do_group_exit(SIGKILL);
1643+ }
1644+#endif
1645+
1646 goto bad_area;
1647+ }
1648
1649 /*
1650 * If for any reason at all we couldn't handle the fault, make
1651diff -urNp linux-3.1.1/arch/powerpc/include/asm/elf.h linux-3.1.1/arch/powerpc/include/asm/elf.h
1652--- linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1653+++ linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1654@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-extern unsigned long randomize_et_dyn(unsigned long base);
1659-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1660+#define ELF_ET_DYN_BASE (0x20000000)
1661+
1662+#ifdef CONFIG_PAX_ASLR
1663+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1664+
1665+#ifdef __powerpc64__
1666+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1667+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1668+#else
1669+#define PAX_DELTA_MMAP_LEN 15
1670+#define PAX_DELTA_STACK_LEN 15
1671+#endif
1672+#endif
1673
1674 /*
1675 * Our registers are always unsigned longs, whether we're a 32 bit
1676@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1677 (0x7ff >> (PAGE_SHIFT - 12)) : \
1678 (0x3ffff >> (PAGE_SHIFT - 12)))
1679
1680-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1681-#define arch_randomize_brk arch_randomize_brk
1682-
1683 #endif /* __KERNEL__ */
1684
1685 /*
1686diff -urNp linux-3.1.1/arch/powerpc/include/asm/kmap_types.h linux-3.1.1/arch/powerpc/include/asm/kmap_types.h
1687--- linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
1688+++ linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
1689@@ -27,6 +27,7 @@ enum km_type {
1690 KM_PPC_SYNC_PAGE,
1691 KM_PPC_SYNC_ICACHE,
1692 KM_KDB,
1693+ KM_CLEARPAGE,
1694 KM_TYPE_NR
1695 };
1696
1697diff -urNp linux-3.1.1/arch/powerpc/include/asm/mman.h linux-3.1.1/arch/powerpc/include/asm/mman.h
1698--- linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
1699+++ linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
1700@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1701 }
1702 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1703
1704-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1705+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1706 {
1707 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1708 }
1709diff -urNp linux-3.1.1/arch/powerpc/include/asm/page_64.h linux-3.1.1/arch/powerpc/include/asm/page_64.h
1710--- linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-11 15:19:27.000000000 -0500
1711+++ linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-16 18:39:07.000000000 -0500
1712@@ -155,15 +155,18 @@ do { \
1713 * stack by default, so in the absence of a PT_GNU_STACK program header
1714 * we turn execute permission off.
1715 */
1716-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1717- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1718+#define VM_STACK_DEFAULT_FLAGS32 \
1719+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1720+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1721
1722 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1723 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1724
1725+#ifndef CONFIG_PAX_PAGEEXEC
1726 #define VM_STACK_DEFAULT_FLAGS \
1727 (is_32bit_task() ? \
1728 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1729+#endif
1730
1731 #include <asm-generic/getorder.h>
1732
1733diff -urNp linux-3.1.1/arch/powerpc/include/asm/page.h linux-3.1.1/arch/powerpc/include/asm/page.h
1734--- linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1735+++ linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1736@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1737 * and needs to be executable. This means the whole heap ends
1738 * up being executable.
1739 */
1740-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1741- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1742+#define VM_DATA_DEFAULT_FLAGS32 \
1743+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1744+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1745
1746 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1747 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1748@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1749 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1750 #endif
1751
1752+#define ktla_ktva(addr) (addr)
1753+#define ktva_ktla(addr) (addr)
1754+
1755 #ifndef __ASSEMBLY__
1756
1757 #undef STRICT_MM_TYPECHECKS
1758diff -urNp linux-3.1.1/arch/powerpc/include/asm/pgtable.h linux-3.1.1/arch/powerpc/include/asm/pgtable.h
1759--- linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1760+++ linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1761@@ -2,6 +2,7 @@
1762 #define _ASM_POWERPC_PGTABLE_H
1763 #ifdef __KERNEL__
1764
1765+#include <linux/const.h>
1766 #ifndef __ASSEMBLY__
1767 #include <asm/processor.h> /* For TASK_SIZE */
1768 #include <asm/mmu.h>
1769diff -urNp linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h
1770--- linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-11 15:19:27.000000000 -0500
1771+++ linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-16 18:39:07.000000000 -0500
1772@@ -21,6 +21,7 @@
1773 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1774 #define _PAGE_USER 0x004 /* usermode access allowed */
1775 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1776+#define _PAGE_EXEC _PAGE_GUARDED
1777 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1778 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1779 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1780diff -urNp linux-3.1.1/arch/powerpc/include/asm/reg.h linux-3.1.1/arch/powerpc/include/asm/reg.h
1781--- linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-11 15:19:27.000000000 -0500
1782+++ linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-16 18:39:07.000000000 -0500
1783@@ -212,6 +212,7 @@
1784 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1785 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1786 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1787+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1788 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1789 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1790 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1791diff -urNp linux-3.1.1/arch/powerpc/include/asm/system.h linux-3.1.1/arch/powerpc/include/asm/system.h
1792--- linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1793+++ linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1794@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1795 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1796 #endif
1797
1798-extern unsigned long arch_align_stack(unsigned long sp);
1799+#define arch_align_stack(x) ((x) & ~0xfUL)
1800
1801 /* Used in very early kernel initialization. */
1802 extern unsigned long reloc_offset(void);
1803diff -urNp linux-3.1.1/arch/powerpc/include/asm/uaccess.h linux-3.1.1/arch/powerpc/include/asm/uaccess.h
1804--- linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
1805+++ linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
1806@@ -13,6 +13,8 @@
1807 #define VERIFY_READ 0
1808 #define VERIFY_WRITE 1
1809
1810+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1811+
1812 /*
1813 * The fs value determines whether argument validity checking should be
1814 * performed or not. If get_fs() == USER_DS, checking is performed, with
1815@@ -327,52 +329,6 @@ do { \
1816 extern unsigned long __copy_tofrom_user(void __user *to,
1817 const void __user *from, unsigned long size);
1818
1819-#ifndef __powerpc64__
1820-
1821-static inline unsigned long copy_from_user(void *to,
1822- const void __user *from, unsigned long n)
1823-{
1824- unsigned long over;
1825-
1826- if (access_ok(VERIFY_READ, from, n))
1827- return __copy_tofrom_user((__force void __user *)to, from, n);
1828- if ((unsigned long)from < TASK_SIZE) {
1829- over = (unsigned long)from + n - TASK_SIZE;
1830- return __copy_tofrom_user((__force void __user *)to, from,
1831- n - over) + over;
1832- }
1833- return n;
1834-}
1835-
1836-static inline unsigned long copy_to_user(void __user *to,
1837- const void *from, unsigned long n)
1838-{
1839- unsigned long over;
1840-
1841- if (access_ok(VERIFY_WRITE, to, n))
1842- return __copy_tofrom_user(to, (__force void __user *)from, n);
1843- if ((unsigned long)to < TASK_SIZE) {
1844- over = (unsigned long)to + n - TASK_SIZE;
1845- return __copy_tofrom_user(to, (__force void __user *)from,
1846- n - over) + over;
1847- }
1848- return n;
1849-}
1850-
1851-#else /* __powerpc64__ */
1852-
1853-#define __copy_in_user(to, from, size) \
1854- __copy_tofrom_user((to), (from), (size))
1855-
1856-extern unsigned long copy_from_user(void *to, const void __user *from,
1857- unsigned long n);
1858-extern unsigned long copy_to_user(void __user *to, const void *from,
1859- unsigned long n);
1860-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1861- unsigned long n);
1862-
1863-#endif /* __powerpc64__ */
1864-
1865 static inline unsigned long __copy_from_user_inatomic(void *to,
1866 const void __user *from, unsigned long n)
1867 {
1868@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1869 if (ret == 0)
1870 return 0;
1871 }
1872+
1873+ if (!__builtin_constant_p(n))
1874+ check_object_size(to, n, false);
1875+
1876 return __copy_tofrom_user((__force void __user *)to, from, n);
1877 }
1878
1879@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1880 if (ret == 0)
1881 return 0;
1882 }
1883+
1884+ if (!__builtin_constant_p(n))
1885+ check_object_size(from, n, true);
1886+
1887 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1888 }
1889
1890@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1891 return __copy_to_user_inatomic(to, from, size);
1892 }
1893
1894+#ifndef __powerpc64__
1895+
1896+static inline unsigned long __must_check copy_from_user(void *to,
1897+ const void __user *from, unsigned long n)
1898+{
1899+ unsigned long over;
1900+
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904+ if (access_ok(VERIFY_READ, from, n)) {
1905+ if (!__builtin_constant_p(n))
1906+ check_object_size(to, n, false);
1907+ return __copy_tofrom_user((__force void __user *)to, from, n);
1908+ }
1909+ if ((unsigned long)from < TASK_SIZE) {
1910+ over = (unsigned long)from + n - TASK_SIZE;
1911+ if (!__builtin_constant_p(n - over))
1912+ check_object_size(to, n - over, false);
1913+ return __copy_tofrom_user((__force void __user *)to, from,
1914+ n - over) + over;
1915+ }
1916+ return n;
1917+}
1918+
1919+static inline unsigned long __must_check copy_to_user(void __user *to,
1920+ const void *from, unsigned long n)
1921+{
1922+ unsigned long over;
1923+
1924+ if ((long)n < 0)
1925+ return n;
1926+
1927+ if (access_ok(VERIFY_WRITE, to, n)) {
1928+ if (!__builtin_constant_p(n))
1929+ check_object_size(from, n, true);
1930+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1931+ }
1932+ if ((unsigned long)to < TASK_SIZE) {
1933+ over = (unsigned long)to + n - TASK_SIZE;
1934+ if (!__builtin_constant_p(n))
1935+ check_object_size(from, n - over, true);
1936+ return __copy_tofrom_user(to, (__force void __user *)from,
1937+ n - over) + over;
1938+ }
1939+ return n;
1940+}
1941+
1942+#else /* __powerpc64__ */
1943+
1944+#define __copy_in_user(to, from, size) \
1945+ __copy_tofrom_user((to), (from), (size))
1946+
1947+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1948+{
1949+ if ((long)n < 0 || n > INT_MAX)
1950+ return n;
1951+
1952+ if (!__builtin_constant_p(n))
1953+ check_object_size(to, n, false);
1954+
1955+ if (likely(access_ok(VERIFY_READ, from, n)))
1956+ n = __copy_from_user(to, from, n);
1957+ else
1958+ memset(to, 0, n);
1959+ return n;
1960+}
1961+
1962+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1963+{
1964+ if ((long)n < 0 || n > INT_MAX)
1965+ return n;
1966+
1967+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1968+ if (!__builtin_constant_p(n))
1969+ check_object_size(from, n, true);
1970+ n = __copy_to_user(to, from, n);
1971+ }
1972+ return n;
1973+}
1974+
1975+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1976+ unsigned long n);
1977+
1978+#endif /* __powerpc64__ */
1979+
1980 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1981
1982 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1983diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S
1984--- linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-11 15:19:27.000000000 -0500
1985+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-16 18:39:07.000000000 -0500
1986@@ -587,6 +587,7 @@ storage_fault_common:
1987 std r14,_DAR(r1)
1988 std r15,_DSISR(r1)
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990+ bl .save_nvgprs
1991 mr r4,r14
1992 mr r5,r15
1993 ld r14,PACA_EXGEN+EX_R14(r13)
1994@@ -596,8 +597,7 @@ storage_fault_common:
1995 cmpdi r3,0
1996 bne- 1f
1997 b .ret_from_except_lite
1998-1: bl .save_nvgprs
1999- mr r5,r3
2000+1: mr r5,r3
2001 addi r3,r1,STACK_FRAME_OVERHEAD
2002 ld r4,_DAR(r1)
2003 bl .bad_page_fault
2004diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S
2005--- linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-11 15:19:27.000000000 -0500
2006+++ linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-16 18:39:07.000000000 -0500
2007@@ -1014,10 +1014,10 @@ handle_page_fault:
2008 11: ld r4,_DAR(r1)
2009 ld r5,_DSISR(r1)
2010 addi r3,r1,STACK_FRAME_OVERHEAD
2011+ bl .save_nvgprs
2012 bl .do_page_fault
2013 cmpdi r3,0
2014 beq+ 13f
2015- bl .save_nvgprs
2016 mr r5,r3
2017 addi r3,r1,STACK_FRAME_OVERHEAD
2018 lwz r4,_DAR(r1)
2019diff -urNp linux-3.1.1/arch/powerpc/kernel/module_32.c linux-3.1.1/arch/powerpc/kernel/module_32.c
2020--- linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-11 15:19:27.000000000 -0500
2021+++ linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-16 18:39:07.000000000 -0500
2022@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2023 me->arch.core_plt_section = i;
2024 }
2025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2026- printk("Module doesn't contain .plt or .init.plt sections.\n");
2027+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2028 return -ENOEXEC;
2029 }
2030
2031@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
2032
2033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2034 /* Init, or core PLT? */
2035- if (location >= mod->module_core
2036- && location < mod->module_core + mod->core_size)
2037+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2038+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2040- else
2041+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2042+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2044+ else {
2045+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2046+ return ~0UL;
2047+ }
2048
2049 /* Find this entry, or if that fails, the next avail. entry */
2050 while (entry->jump[0]) {
2051diff -urNp linux-3.1.1/arch/powerpc/kernel/process.c linux-3.1.1/arch/powerpc/kernel/process.c
2052--- linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2053+++ linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-16 18:40:08.000000000 -0500
2054@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2055 * Lookup NIP late so we have the best change of getting the
2056 * above info out without failing
2057 */
2058- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2059- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2060+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2061+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2062 #endif
2063 show_stack(current, (unsigned long *) regs->gpr[1]);
2064 if (!user_mode(regs))
2065@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk,
2066 newsp = stack[0];
2067 ip = stack[STACK_FRAME_LR_SAVE];
2068 if (!firstframe || ip != lr) {
2069- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2070+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2071 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2072 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2073- printk(" (%pS)",
2074+ printk(" (%pA)",
2075 (void *)current->ret_stack[curr_frame].ret);
2076 curr_frame--;
2077 }
2078@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk,
2079 struct pt_regs *regs = (struct pt_regs *)
2080 (sp + STACK_FRAME_OVERHEAD);
2081 lr = regs->link;
2082- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2083+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2084 regs->trap, (void *)regs->nip, (void *)lr);
2085 firstframe = 1;
2086 }
2087@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2088 }
2089
2090 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2091-
2092-unsigned long arch_align_stack(unsigned long sp)
2093-{
2094- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2095- sp -= get_random_int() & ~PAGE_MASK;
2096- return sp & ~0xf;
2097-}
2098-
2099-static inline unsigned long brk_rnd(void)
2100-{
2101- unsigned long rnd = 0;
2102-
2103- /* 8MB for 32bit, 1GB for 64bit */
2104- if (is_32bit_task())
2105- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2106- else
2107- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2108-
2109- return rnd << PAGE_SHIFT;
2110-}
2111-
2112-unsigned long arch_randomize_brk(struct mm_struct *mm)
2113-{
2114- unsigned long base = mm->brk;
2115- unsigned long ret;
2116-
2117-#ifdef CONFIG_PPC_STD_MMU_64
2118- /*
2119- * If we are using 1TB segments and we are allowed to randomise
2120- * the heap, we can put it above 1TB so it is backed by a 1TB
2121- * segment. Otherwise the heap will be in the bottom 1TB
2122- * which always uses 256MB segments and this may result in a
2123- * performance penalty.
2124- */
2125- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2126- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2127-#endif
2128-
2129- ret = PAGE_ALIGN(base + brk_rnd());
2130-
2131- if (ret < mm->brk)
2132- return mm->brk;
2133-
2134- return ret;
2135-}
2136-
2137-unsigned long randomize_et_dyn(unsigned long base)
2138-{
2139- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2140-
2141- if (ret < base)
2142- return base;
2143-
2144- return ret;
2145-}
2146diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_32.c linux-3.1.1/arch/powerpc/kernel/signal_32.c
2147--- linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-11 15:19:27.000000000 -0500
2148+++ linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-16 18:39:07.000000000 -0500
2149@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2150 /* Save user registers on the stack */
2151 frame = &rt_sf->uc.uc_mcontext;
2152 addr = frame;
2153- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2154+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2155 if (save_user_regs(regs, frame, 0, 1))
2156 goto badframe;
2157 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2158diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_64.c linux-3.1.1/arch/powerpc/kernel/signal_64.c
2159--- linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-11 15:19:27.000000000 -0500
2160+++ linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-16 18:39:07.000000000 -0500
2161@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2162 current->thread.fpscr.val = 0;
2163
2164 /* Set up to return from userspace. */
2165- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2166+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2167 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2168 } else {
2169 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2170diff -urNp linux-3.1.1/arch/powerpc/kernel/traps.c linux-3.1.1/arch/powerpc/kernel/traps.c
2171--- linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
2172+++ linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
2173@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2174 static inline void pmac_backlight_unblank(void) { }
2175 #endif
2176
2177+extern void gr_handle_kernel_exploit(void);
2178+
2179 int die(const char *str, struct pt_regs *regs, long err)
2180 {
2181 static struct {
2182@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2183 if (panic_on_oops)
2184 panic("Fatal exception");
2185
2186+ gr_handle_kernel_exploit();
2187+
2188 oops_exit();
2189 do_exit(err);
2190
2191diff -urNp linux-3.1.1/arch/powerpc/kernel/vdso.c linux-3.1.1/arch/powerpc/kernel/vdso.c
2192--- linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-11 15:19:27.000000000 -0500
2193+++ linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-16 18:39:07.000000000 -0500
2194@@ -36,6 +36,7 @@
2195 #include <asm/firmware.h>
2196 #include <asm/vdso.h>
2197 #include <asm/vdso_datapage.h>
2198+#include <asm/mman.h>
2199
2200 #include "setup.h"
2201
2202@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2203 vdso_base = VDSO32_MBASE;
2204 #endif
2205
2206- current->mm->context.vdso_base = 0;
2207+ current->mm->context.vdso_base = ~0UL;
2208
2209 /* vDSO has a problem and was disabled, just don't "enable" it for the
2210 * process
2211@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2212 vdso_base = get_unmapped_area(NULL, vdso_base,
2213 (vdso_pages << PAGE_SHIFT) +
2214 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2215- 0, 0);
2216+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2217 if (IS_ERR_VALUE(vdso_base)) {
2218 rc = vdso_base;
2219 goto fail_mmapsem;
2220diff -urNp linux-3.1.1/arch/powerpc/lib/usercopy_64.c linux-3.1.1/arch/powerpc/lib/usercopy_64.c
2221--- linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
2222+++ linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
2223@@ -9,22 +9,6 @@
2224 #include <linux/module.h>
2225 #include <asm/uaccess.h>
2226
2227-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2228-{
2229- if (likely(access_ok(VERIFY_READ, from, n)))
2230- n = __copy_from_user(to, from, n);
2231- else
2232- memset(to, 0, n);
2233- return n;
2234-}
2235-
2236-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2237-{
2238- if (likely(access_ok(VERIFY_WRITE, to, n)))
2239- n = __copy_to_user(to, from, n);
2240- return n;
2241-}
2242-
2243 unsigned long copy_in_user(void __user *to, const void __user *from,
2244 unsigned long n)
2245 {
2246@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2247 return n;
2248 }
2249
2250-EXPORT_SYMBOL(copy_from_user);
2251-EXPORT_SYMBOL(copy_to_user);
2252 EXPORT_SYMBOL(copy_in_user);
2253
2254diff -urNp linux-3.1.1/arch/powerpc/mm/fault.c linux-3.1.1/arch/powerpc/mm/fault.c
2255--- linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
2256+++ linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
2257@@ -32,6 +32,10 @@
2258 #include <linux/perf_event.h>
2259 #include <linux/magic.h>
2260 #include <linux/ratelimit.h>
2261+#include <linux/slab.h>
2262+#include <linux/pagemap.h>
2263+#include <linux/compiler.h>
2264+#include <linux/unistd.h>
2265
2266 #include <asm/firmware.h>
2267 #include <asm/page.h>
2268@@ -43,6 +47,7 @@
2269 #include <asm/tlbflush.h>
2270 #include <asm/siginfo.h>
2271 #include <mm/mmu_decl.h>
2272+#include <asm/ptrace.h>
2273
2274 #ifdef CONFIG_KPROBES
2275 static inline int notify_page_fault(struct pt_regs *regs)
2276@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2277 }
2278 #endif
2279
2280+#ifdef CONFIG_PAX_PAGEEXEC
2281+/*
2282+ * PaX: decide what to do with offenders (regs->nip = fault address)
2283+ *
2284+ * returns 1 when task should be killed
2285+ */
2286+static int pax_handle_fetch_fault(struct pt_regs *regs)
2287+{
2288+ return 1;
2289+}
2290+
2291+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2292+{
2293+ unsigned long i;
2294+
2295+ printk(KERN_ERR "PAX: bytes at PC: ");
2296+ for (i = 0; i < 5; i++) {
2297+ unsigned int c;
2298+ if (get_user(c, (unsigned int __user *)pc+i))
2299+ printk(KERN_CONT "???????? ");
2300+ else
2301+ printk(KERN_CONT "%08x ", c);
2302+ }
2303+ printk("\n");
2304+}
2305+#endif
2306+
2307 /*
2308 * Check whether the instruction at regs->nip is a store using
2309 * an update addressing form which will update r1.
2310@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2311 * indicate errors in DSISR but can validly be set in SRR1.
2312 */
2313 if (trap == 0x400)
2314- error_code &= 0x48200000;
2315+ error_code &= 0x58200000;
2316 else
2317 is_write = error_code & DSISR_ISSTORE;
2318 #else
2319@@ -259,7 +291,7 @@ good_area:
2320 * "undefined". Of those that can be set, this is the only
2321 * one which seems bad.
2322 */
2323- if (error_code & 0x10000000)
2324+ if (error_code & DSISR_GUARDED)
2325 /* Guarded storage error. */
2326 goto bad_area;
2327 #endif /* CONFIG_8xx */
2328@@ -274,7 +306,7 @@ good_area:
2329 * processors use the same I/D cache coherency mechanism
2330 * as embedded.
2331 */
2332- if (error_code & DSISR_PROTFAULT)
2333+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2334 goto bad_area;
2335 #endif /* CONFIG_PPC_STD_MMU */
2336
2337@@ -343,6 +375,23 @@ bad_area:
2338 bad_area_nosemaphore:
2339 /* User mode accesses cause a SIGSEGV */
2340 if (user_mode(regs)) {
2341+
2342+#ifdef CONFIG_PAX_PAGEEXEC
2343+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2344+#ifdef CONFIG_PPC_STD_MMU
2345+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2346+#else
2347+ if (is_exec && regs->nip == address) {
2348+#endif
2349+ switch (pax_handle_fetch_fault(regs)) {
2350+ }
2351+
2352+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2353+ do_group_exit(SIGKILL);
2354+ }
2355+ }
2356+#endif
2357+
2358 _exception(SIGSEGV, regs, code, address);
2359 return 0;
2360 }
2361diff -urNp linux-3.1.1/arch/powerpc/mm/mmap_64.c linux-3.1.1/arch/powerpc/mm/mmap_64.c
2362--- linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-11 15:19:27.000000000 -0500
2363+++ linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-16 18:39:07.000000000 -0500
2364@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2365 */
2366 if (mmap_is_legacy()) {
2367 mm->mmap_base = TASK_UNMAPPED_BASE;
2368+
2369+#ifdef CONFIG_PAX_RANDMMAP
2370+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2371+ mm->mmap_base += mm->delta_mmap;
2372+#endif
2373+
2374 mm->get_unmapped_area = arch_get_unmapped_area;
2375 mm->unmap_area = arch_unmap_area;
2376 } else {
2377 mm->mmap_base = mmap_base();
2378+
2379+#ifdef CONFIG_PAX_RANDMMAP
2380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2381+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2382+#endif
2383+
2384 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2385 mm->unmap_area = arch_unmap_area_topdown;
2386 }
2387diff -urNp linux-3.1.1/arch/powerpc/mm/slice.c linux-3.1.1/arch/powerpc/mm/slice.c
2388--- linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-11 15:19:27.000000000 -0500
2389+++ linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-16 18:39:07.000000000 -0500
2390@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2391 if ((mm->task_size - len) < addr)
2392 return 0;
2393 vma = find_vma(mm, addr);
2394- return (!vma || (addr + len) <= vma->vm_start);
2395+ return check_heap_stack_gap(vma, addr, len);
2396 }
2397
2398 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2399@@ -256,7 +256,7 @@ full_search:
2400 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2401 continue;
2402 }
2403- if (!vma || addr + len <= vma->vm_start) {
2404+ if (check_heap_stack_gap(vma, addr, len)) {
2405 /*
2406 * Remember the place where we stopped the search:
2407 */
2408@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2409 }
2410 }
2411
2412- addr = mm->mmap_base;
2413- while (addr > len) {
2414+ if (mm->mmap_base < len)
2415+ addr = -ENOMEM;
2416+ else
2417+ addr = mm->mmap_base - len;
2418+
2419+ while (!IS_ERR_VALUE(addr)) {
2420 /* Go down by chunk size */
2421- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2422+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2423
2424 /* Check for hit with different page size */
2425 mask = slice_range_to_mask(addr, len);
2426@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2427 * return with success:
2428 */
2429 vma = find_vma(mm, addr);
2430- if (!vma || (addr + len) <= vma->vm_start) {
2431+ if (check_heap_stack_gap(vma, addr, len)) {
2432 /* remember the address as a hint for next time */
2433 if (use_cache)
2434 mm->free_area_cache = addr;
2435@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2436 mm->cached_hole_size = vma->vm_start - addr;
2437
2438 /* try just below the current vma->vm_start */
2439- addr = vma->vm_start;
2440+ addr = skip_heap_stack_gap(vma, len);
2441 }
2442
2443 /*
2444@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2445 if (fixed && addr > (mm->task_size - len))
2446 return -EINVAL;
2447
2448+#ifdef CONFIG_PAX_RANDMMAP
2449+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2450+ addr = 0;
2451+#endif
2452+
2453 /* If hint, make sure it matches our alignment restrictions */
2454 if (!fixed && addr) {
2455 addr = _ALIGN_UP(addr, 1ul << pshift);
2456diff -urNp linux-3.1.1/arch/s390/include/asm/elf.h linux-3.1.1/arch/s390/include/asm/elf.h
2457--- linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
2458+++ linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
2459@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2460 the loader. We need to make sure that it is out of the way of the program
2461 that it will "exec", and that there is sufficient room for the brk. */
2462
2463-extern unsigned long randomize_et_dyn(unsigned long base);
2464-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2465+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2466+
2467+#ifdef CONFIG_PAX_ASLR
2468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2469+
2470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2472+#endif
2473
2474 /* This yields a mask that user programs can use to figure out what
2475 instruction set this CPU supports. */
2476@@ -211,7 +217,4 @@ struct linux_binprm;
2477 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2478 int arch_setup_additional_pages(struct linux_binprm *, int);
2479
2480-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2481-#define arch_randomize_brk arch_randomize_brk
2482-
2483 #endif
2484diff -urNp linux-3.1.1/arch/s390/include/asm/system.h linux-3.1.1/arch/s390/include/asm/system.h
2485--- linux-3.1.1/arch/s390/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2486+++ linux-3.1.1/arch/s390/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2487@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *co
2488 extern void (*_machine_halt)(void);
2489 extern void (*_machine_power_off)(void);
2490
2491-extern unsigned long arch_align_stack(unsigned long sp);
2492+#define arch_align_stack(x) ((x) & ~0xfUL)
2493
2494 static inline int tprot(unsigned long addr)
2495 {
2496diff -urNp linux-3.1.1/arch/s390/include/asm/uaccess.h linux-3.1.1/arch/s390/include/asm/uaccess.h
2497--- linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
2498+++ linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
2499@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2500 copy_to_user(void __user *to, const void *from, unsigned long n)
2501 {
2502 might_fault();
2503+
2504+ if ((long)n < 0)
2505+ return n;
2506+
2507 if (access_ok(VERIFY_WRITE, to, n))
2508 n = __copy_to_user(to, from, n);
2509 return n;
2510@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2511 static inline unsigned long __must_check
2512 __copy_from_user(void *to, const void __user *from, unsigned long n)
2513 {
2514+ if ((long)n < 0)
2515+ return n;
2516+
2517 if (__builtin_constant_p(n) && (n <= 256))
2518 return uaccess.copy_from_user_small(n, from, to);
2519 else
2520@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2521 unsigned int sz = __compiletime_object_size(to);
2522
2523 might_fault();
2524+
2525+ if ((long)n < 0)
2526+ return n;
2527+
2528 if (unlikely(sz != -1 && sz < n)) {
2529 copy_from_user_overflow();
2530 return n;
2531diff -urNp linux-3.1.1/arch/s390/kernel/module.c linux-3.1.1/arch/s390/kernel/module.c
2532--- linux-3.1.1/arch/s390/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
2533+++ linux-3.1.1/arch/s390/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
2534@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2535
2536 /* Increase core size by size of got & plt and set start
2537 offsets for got and plt. */
2538- me->core_size = ALIGN(me->core_size, 4);
2539- me->arch.got_offset = me->core_size;
2540- me->core_size += me->arch.got_size;
2541- me->arch.plt_offset = me->core_size;
2542- me->core_size += me->arch.plt_size;
2543+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2544+ me->arch.got_offset = me->core_size_rw;
2545+ me->core_size_rw += me->arch.got_size;
2546+ me->arch.plt_offset = me->core_size_rx;
2547+ me->core_size_rx += me->arch.plt_size;
2548 return 0;
2549 }
2550
2551@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2552 if (info->got_initialized == 0) {
2553 Elf_Addr *gotent;
2554
2555- gotent = me->module_core + me->arch.got_offset +
2556+ gotent = me->module_core_rw + me->arch.got_offset +
2557 info->got_offset;
2558 *gotent = val;
2559 info->got_initialized = 1;
2560@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2561 else if (r_type == R_390_GOTENT ||
2562 r_type == R_390_GOTPLTENT)
2563 *(unsigned int *) loc =
2564- (val + (Elf_Addr) me->module_core - loc) >> 1;
2565+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2566 else if (r_type == R_390_GOT64 ||
2567 r_type == R_390_GOTPLT64)
2568 *(unsigned long *) loc = val;
2569@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2570 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2571 if (info->plt_initialized == 0) {
2572 unsigned int *ip;
2573- ip = me->module_core + me->arch.plt_offset +
2574+ ip = me->module_core_rx + me->arch.plt_offset +
2575 info->plt_offset;
2576 #ifndef CONFIG_64BIT
2577 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2578@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2579 val - loc + 0xffffUL < 0x1ffffeUL) ||
2580 (r_type == R_390_PLT32DBL &&
2581 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2582- val = (Elf_Addr) me->module_core +
2583+ val = (Elf_Addr) me->module_core_rx +
2584 me->arch.plt_offset +
2585 info->plt_offset;
2586 val += rela->r_addend - loc;
2587@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2588 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2589 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2590 val = val + rela->r_addend -
2591- ((Elf_Addr) me->module_core + me->arch.got_offset);
2592+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2593 if (r_type == R_390_GOTOFF16)
2594 *(unsigned short *) loc = val;
2595 else if (r_type == R_390_GOTOFF32)
2596@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2597 break;
2598 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2599 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2600- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2601+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2602 rela->r_addend - loc;
2603 if (r_type == R_390_GOTPC)
2604 *(unsigned int *) loc = val;
2605diff -urNp linux-3.1.1/arch/s390/kernel/process.c linux-3.1.1/arch/s390/kernel/process.c
2606--- linux-3.1.1/arch/s390/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2607+++ linux-3.1.1/arch/s390/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2608@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2609 }
2610 return 0;
2611 }
2612-
2613-unsigned long arch_align_stack(unsigned long sp)
2614-{
2615- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2616- sp -= get_random_int() & ~PAGE_MASK;
2617- return sp & ~0xf;
2618-}
2619-
2620-static inline unsigned long brk_rnd(void)
2621-{
2622- /* 8MB for 32bit, 1GB for 64bit */
2623- if (is_32bit_task())
2624- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2625- else
2626- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2627-}
2628-
2629-unsigned long arch_randomize_brk(struct mm_struct *mm)
2630-{
2631- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2632-
2633- if (ret < mm->brk)
2634- return mm->brk;
2635- return ret;
2636-}
2637-
2638-unsigned long randomize_et_dyn(unsigned long base)
2639-{
2640- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2641-
2642- if (!(current->flags & PF_RANDOMIZE))
2643- return base;
2644- if (ret < base)
2645- return base;
2646- return ret;
2647-}
2648diff -urNp linux-3.1.1/arch/s390/kernel/setup.c linux-3.1.1/arch/s390/kernel/setup.c
2649--- linux-3.1.1/arch/s390/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
2650+++ linux-3.1.1/arch/s390/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
2651@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2652 }
2653 early_param("mem", early_parse_mem);
2654
2655-unsigned int user_mode = HOME_SPACE_MODE;
2656+unsigned int user_mode = SECONDARY_SPACE_MODE;
2657 EXPORT_SYMBOL_GPL(user_mode);
2658
2659 static int set_amode_and_uaccess(unsigned long user_amode,
2660diff -urNp linux-3.1.1/arch/s390/mm/mmap.c linux-3.1.1/arch/s390/mm/mmap.c
2661--- linux-3.1.1/arch/s390/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2662+++ linux-3.1.1/arch/s390/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2663@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2664 */
2665 if (mmap_is_legacy()) {
2666 mm->mmap_base = TASK_UNMAPPED_BASE;
2667+
2668+#ifdef CONFIG_PAX_RANDMMAP
2669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2670+ mm->mmap_base += mm->delta_mmap;
2671+#endif
2672+
2673 mm->get_unmapped_area = arch_get_unmapped_area;
2674 mm->unmap_area = arch_unmap_area;
2675 } else {
2676 mm->mmap_base = mmap_base();
2677+
2678+#ifdef CONFIG_PAX_RANDMMAP
2679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2681+#endif
2682+
2683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2684 mm->unmap_area = arch_unmap_area_topdown;
2685 }
2686@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = s390_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709diff -urNp linux-3.1.1/arch/score/include/asm/system.h linux-3.1.1/arch/score/include/asm/system.h
2710--- linux-3.1.1/arch/score/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2711+++ linux-3.1.1/arch/score/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2712@@ -17,7 +17,7 @@ do { \
2713 #define finish_arch_switch(prev) do {} while (0)
2714
2715 typedef void (*vi_handler_t)(void);
2716-extern unsigned long arch_align_stack(unsigned long sp);
2717+#define arch_align_stack(x) (x)
2718
2719 #define mb() barrier()
2720 #define rmb() barrier()
2721diff -urNp linux-3.1.1/arch/score/kernel/process.c linux-3.1.1/arch/score/kernel/process.c
2722--- linux-3.1.1/arch/score/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2723+++ linux-3.1.1/arch/score/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2724@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2725
2726 return task_pt_regs(task)->cp0_epc;
2727 }
2728-
2729-unsigned long arch_align_stack(unsigned long sp)
2730-{
2731- return sp;
2732-}
2733diff -urNp linux-3.1.1/arch/sh/mm/mmap.c linux-3.1.1/arch/sh/mm/mmap.c
2734--- linux-3.1.1/arch/sh/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2735+++ linux-3.1.1/arch/sh/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2736@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2737 addr = PAGE_ALIGN(addr);
2738
2739 vma = find_vma(mm, addr);
2740- if (TASK_SIZE - len >= addr &&
2741- (!vma || addr + len <= vma->vm_start))
2742+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2743 return addr;
2744 }
2745
2746@@ -106,7 +105,7 @@ full_search:
2747 }
2748 return -ENOMEM;
2749 }
2750- if (likely(!vma || addr + len <= vma->vm_start)) {
2751+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2752 /*
2753 * Remember the place where we stopped the search:
2754 */
2755@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2756 addr = PAGE_ALIGN(addr);
2757
2758 vma = find_vma(mm, addr);
2759- if (TASK_SIZE - len >= addr &&
2760- (!vma || addr + len <= vma->vm_start))
2761+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2762 return addr;
2763 }
2764
2765@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2766 /* make sure it can fit in the remaining address space */
2767 if (likely(addr > len)) {
2768 vma = find_vma(mm, addr-len);
2769- if (!vma || addr <= vma->vm_start) {
2770+ if (check_heap_stack_gap(vma, addr - len, len)) {
2771 /* remember the address as a hint for next time */
2772 return (mm->free_area_cache = addr-len);
2773 }
2774@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2775 if (unlikely(mm->mmap_base < len))
2776 goto bottomup;
2777
2778- addr = mm->mmap_base-len;
2779- if (do_colour_align)
2780- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2781+ addr = mm->mmap_base - len;
2782
2783 do {
2784+ if (do_colour_align)
2785+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2786 /*
2787 * Lookup failure means no vma is above this address,
2788 * else if new region fits below vma->vm_start,
2789 * return with success:
2790 */
2791 vma = find_vma(mm, addr);
2792- if (likely(!vma || addr+len <= vma->vm_start)) {
2793+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr);
2796 }
2797@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2798 mm->cached_hole_size = vma->vm_start - addr;
2799
2800 /* try just below the current vma->vm_start */
2801- addr = vma->vm_start-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804- } while (likely(len < vma->vm_start));
2805+ addr = skip_heap_stack_gap(vma, len);
2806+ } while (!IS_ERR_VALUE(addr));
2807
2808 bottomup:
2809 /*
2810diff -urNp linux-3.1.1/arch/sparc/include/asm/atomic_64.h linux-3.1.1/arch/sparc/include/asm/atomic_64.h
2811--- linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-11 15:19:27.000000000 -0500
2812+++ linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-16 18:39:07.000000000 -0500
2813@@ -14,18 +14,40 @@
2814 #define ATOMIC64_INIT(i) { (i) }
2815
2816 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2817+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2818+{
2819+ return v->counter;
2820+}
2821 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2822+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2823+{
2824+ return v->counter;
2825+}
2826
2827 #define atomic_set(v, i) (((v)->counter) = i)
2828+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2829+{
2830+ v->counter = i;
2831+}
2832 #define atomic64_set(v, i) (((v)->counter) = i)
2833+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2834+{
2835+ v->counter = i;
2836+}
2837
2838 extern void atomic_add(int, atomic_t *);
2839+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2840 extern void atomic64_add(long, atomic64_t *);
2841+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2842 extern void atomic_sub(int, atomic_t *);
2843+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2844 extern void atomic64_sub(long, atomic64_t *);
2845+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2846
2847 extern int atomic_add_ret(int, atomic_t *);
2848+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2849 extern long atomic64_add_ret(long, atomic64_t *);
2850+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2851 extern int atomic_sub_ret(int, atomic_t *);
2852 extern long atomic64_sub_ret(long, atomic64_t *);
2853
2854@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2855 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2856
2857 #define atomic_inc_return(v) atomic_add_ret(1, v)
2858+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2859+{
2860+ return atomic_add_ret_unchecked(1, v);
2861+}
2862 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2863+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2864+{
2865+ return atomic64_add_ret_unchecked(1, v);
2866+}
2867
2868 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2869 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2870
2871 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2872+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2873+{
2874+ return atomic_add_ret_unchecked(i, v);
2875+}
2876 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2877+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2878+{
2879+ return atomic64_add_ret_unchecked(i, v);
2880+}
2881
2882 /*
2883 * atomic_inc_and_test - increment and test
2884@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2885 * other cases.
2886 */
2887 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2888+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2889+{
2890+ return atomic_inc_return_unchecked(v) == 0;
2891+}
2892 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2893
2894 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2895@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
2896 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2897
2898 #define atomic_inc(v) atomic_add(1, v)
2899+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2900+{
2901+ atomic_add_unchecked(1, v);
2902+}
2903 #define atomic64_inc(v) atomic64_add(1, v)
2904+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2905+{
2906+ atomic64_add_unchecked(1, v);
2907+}
2908
2909 #define atomic_dec(v) atomic_sub(1, v)
2910+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2911+{
2912+ atomic_sub_unchecked(1, v);
2913+}
2914 #define atomic64_dec(v) atomic64_sub(1, v)
2915+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2916+{
2917+ atomic64_sub_unchecked(1, v);
2918+}
2919
2920 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2921 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2922
2923 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2925+{
2926+ return cmpxchg(&v->counter, old, new);
2927+}
2928 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2929+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2930+{
2931+ return xchg(&v->counter, new);
2932+}
2933
2934 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2935 {
2936- int c, old;
2937+ int c, old, new;
2938 c = atomic_read(v);
2939 for (;;) {
2940- if (unlikely(c == (u)))
2941+ if (unlikely(c == u))
2942 break;
2943- old = atomic_cmpxchg((v), c, c + (a));
2944+
2945+ asm volatile("addcc %2, %0, %0\n"
2946+
2947+#ifdef CONFIG_PAX_REFCOUNT
2948+ "tvs %%icc, 6\n"
2949+#endif
2950+
2951+ : "=r" (new)
2952+ : "0" (c), "ir" (a)
2953+ : "cc");
2954+
2955+ old = atomic_cmpxchg(v, c, new);
2956 if (likely(old == c))
2957 break;
2958 c = old;
2959@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(at
2960 #define atomic64_cmpxchg(v, o, n) \
2961 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2962 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2963+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2964+{
2965+ return xchg(&v->counter, new);
2966+}
2967
2968 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2969 {
2970- long c, old;
2971+ long c, old, new;
2972 c = atomic64_read(v);
2973 for (;;) {
2974- if (unlikely(c == (u)))
2975+ if (unlikely(c == u))
2976 break;
2977- old = atomic64_cmpxchg((v), c, c + (a));
2978+
2979+ asm volatile("addcc %2, %0, %0\n"
2980+
2981+#ifdef CONFIG_PAX_REFCOUNT
2982+ "tvs %%xcc, 6\n"
2983+#endif
2984+
2985+ : "=r" (new)
2986+ : "0" (c), "ir" (a)
2987+ : "cc");
2988+
2989+ old = atomic64_cmpxchg(v, c, new);
2990 if (likely(old == c))
2991 break;
2992 c = old;
2993 }
2994- return c != (u);
2995+ return c != u;
2996 }
2997
2998 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2999diff -urNp linux-3.1.1/arch/sparc/include/asm/cache.h linux-3.1.1/arch/sparc/include/asm/cache.h
3000--- linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
3001+++ linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
3002@@ -10,7 +10,7 @@
3003 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3004
3005 #define L1_CACHE_SHIFT 5
3006-#define L1_CACHE_BYTES 32
3007+#define L1_CACHE_BYTES 32UL
3008
3009 #ifdef CONFIG_SPARC32
3010 #define SMP_CACHE_BYTES_SHIFT 5
3011diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_32.h linux-3.1.1/arch/sparc/include/asm/elf_32.h
3012--- linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-11 15:19:27.000000000 -0500
3013+++ linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-16 18:39:07.000000000 -0500
3014@@ -114,6 +114,13 @@ typedef struct {
3015
3016 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3017
3018+#ifdef CONFIG_PAX_ASLR
3019+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3020+
3021+#define PAX_DELTA_MMAP_LEN 16
3022+#define PAX_DELTA_STACK_LEN 16
3023+#endif
3024+
3025 /* This yields a mask that user programs can use to figure out what
3026 instruction set this cpu supports. This can NOT be done in userspace
3027 on Sparc. */
3028diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_64.h linux-3.1.1/arch/sparc/include/asm/elf_64.h
3029--- linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-11 15:19:27.000000000 -0500
3030+++ linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-16 18:39:07.000000000 -0500
3031@@ -180,6 +180,13 @@ typedef struct {
3032 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3033 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3034
3035+#ifdef CONFIG_PAX_ASLR
3036+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3037+
3038+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3039+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3040+#endif
3041+
3042 extern unsigned long sparc64_elf_hwcap;
3043 #define ELF_HWCAP sparc64_elf_hwcap
3044
3045diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtable_32.h linux-3.1.1/arch/sparc/include/asm/pgtable_32.h
3046--- linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
3047+++ linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
3048@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3049 BTFIXUPDEF_INT(page_none)
3050 BTFIXUPDEF_INT(page_copy)
3051 BTFIXUPDEF_INT(page_readonly)
3052+
3053+#ifdef CONFIG_PAX_PAGEEXEC
3054+BTFIXUPDEF_INT(page_shared_noexec)
3055+BTFIXUPDEF_INT(page_copy_noexec)
3056+BTFIXUPDEF_INT(page_readonly_noexec)
3057+#endif
3058+
3059 BTFIXUPDEF_INT(page_kernel)
3060
3061 #define PMD_SHIFT SUN4C_PMD_SHIFT
3062@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3063 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3064 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3065
3066+#ifdef CONFIG_PAX_PAGEEXEC
3067+extern pgprot_t PAGE_SHARED_NOEXEC;
3068+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3069+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3070+#else
3071+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3072+# define PAGE_COPY_NOEXEC PAGE_COPY
3073+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3074+#endif
3075+
3076 extern unsigned long page_kernel;
3077
3078 #ifdef MODULE
3079diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h
3080--- linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-11 15:19:27.000000000 -0500
3081+++ linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-16 18:39:07.000000000 -0500
3082@@ -115,6 +115,13 @@
3083 SRMMU_EXEC | SRMMU_REF)
3084 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3085 SRMMU_EXEC | SRMMU_REF)
3086+
3087+#ifdef CONFIG_PAX_PAGEEXEC
3088+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3089+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3090+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3091+#endif
3092+
3093 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3094 SRMMU_DIRTY | SRMMU_REF)
3095
3096diff -urNp linux-3.1.1/arch/sparc/include/asm/spinlock_64.h linux-3.1.1/arch/sparc/include/asm/spinlock_64.h
3097--- linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-11 15:19:27.000000000 -0500
3098+++ linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-16 18:39:07.000000000 -0500
3099@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3100
3101 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3102
3103-static void inline arch_read_lock(arch_rwlock_t *lock)
3104+static inline void arch_read_lock(arch_rwlock_t *lock)
3105 {
3106 unsigned long tmp1, tmp2;
3107
3108 __asm__ __volatile__ (
3109 "1: ldsw [%2], %0\n"
3110 " brlz,pn %0, 2f\n"
3111-"4: add %0, 1, %1\n"
3112+"4: addcc %0, 1, %1\n"
3113+
3114+#ifdef CONFIG_PAX_REFCOUNT
3115+" tvs %%icc, 6\n"
3116+#endif
3117+
3118 " cas [%2], %0, %1\n"
3119 " cmp %0, %1\n"
3120 " bne,pn %%icc, 1b\n"
3121@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3122 " .previous"
3123 : "=&r" (tmp1), "=&r" (tmp2)
3124 : "r" (lock)
3125- : "memory");
3126+ : "memory", "cc");
3127 }
3128
3129-static int inline arch_read_trylock(arch_rwlock_t *lock)
3130+static inline int arch_read_trylock(arch_rwlock_t *lock)
3131 {
3132 int tmp1, tmp2;
3133
3134@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3135 "1: ldsw [%2], %0\n"
3136 " brlz,a,pn %0, 2f\n"
3137 " mov 0, %0\n"
3138-" add %0, 1, %1\n"
3139+" addcc %0, 1, %1\n"
3140+
3141+#ifdef CONFIG_PAX_REFCOUNT
3142+" tvs %%icc, 6\n"
3143+#endif
3144+
3145 " cas [%2], %0, %1\n"
3146 " cmp %0, %1\n"
3147 " bne,pn %%icc, 1b\n"
3148@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3149 return tmp1;
3150 }
3151
3152-static void inline arch_read_unlock(arch_rwlock_t *lock)
3153+static inline void arch_read_unlock(arch_rwlock_t *lock)
3154 {
3155 unsigned long tmp1, tmp2;
3156
3157 __asm__ __volatile__(
3158 "1: lduw [%2], %0\n"
3159-" sub %0, 1, %1\n"
3160+" subcc %0, 1, %1\n"
3161+
3162+#ifdef CONFIG_PAX_REFCOUNT
3163+" tvs %%icc, 6\n"
3164+#endif
3165+
3166 " cas [%2], %0, %1\n"
3167 " cmp %0, %1\n"
3168 " bne,pn %%xcc, 1b\n"
3169@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3170 : "memory");
3171 }
3172
3173-static void inline arch_write_lock(arch_rwlock_t *lock)
3174+static inline void arch_write_lock(arch_rwlock_t *lock)
3175 {
3176 unsigned long mask, tmp1, tmp2;
3177
3178@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3179 : "memory");
3180 }
3181
3182-static void inline arch_write_unlock(arch_rwlock_t *lock)
3183+static inline void arch_write_unlock(arch_rwlock_t *lock)
3184 {
3185 __asm__ __volatile__(
3186 " stw %%g0, [%0]"
3187@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3188 : "memory");
3189 }
3190
3191-static int inline arch_write_trylock(arch_rwlock_t *lock)
3192+static inline int arch_write_trylock(arch_rwlock_t *lock)
3193 {
3194 unsigned long mask, tmp1, tmp2, result;
3195
3196diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_32.h linux-3.1.1/arch/sparc/include/asm/thread_info_32.h
3197--- linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-11 15:19:27.000000000 -0500
3198+++ linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-16 18:39:07.000000000 -0500
3199@@ -50,6 +50,8 @@ struct thread_info {
3200 unsigned long w_saved;
3201
3202 struct restart_block restart_block;
3203+
3204+ unsigned long lowest_stack;
3205 };
3206
3207 /*
3208diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_64.h linux-3.1.1/arch/sparc/include/asm/thread_info_64.h
3209--- linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-11 15:19:27.000000000 -0500
3210+++ linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-16 18:39:07.000000000 -0500
3211@@ -63,6 +63,8 @@ struct thread_info {
3212 struct pt_regs *kern_una_regs;
3213 unsigned int kern_una_insn;
3214
3215+ unsigned long lowest_stack;
3216+
3217 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3218 };
3219
3220diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_32.h linux-3.1.1/arch/sparc/include/asm/uaccess_32.h
3221--- linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
3222+++ linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-16 18:39:07.000000000 -0500
3223@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3224
3225 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3226 {
3227- if (n && __access_ok((unsigned long) to, n))
3228+ if ((long)n < 0)
3229+ return n;
3230+
3231+ if (n && __access_ok((unsigned long) to, n)) {
3232+ if (!__builtin_constant_p(n))
3233+ check_object_size(from, n, true);
3234 return __copy_user(to, (__force void __user *) from, n);
3235- else
3236+ } else
3237 return n;
3238 }
3239
3240 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242+ if ((long)n < 0)
3243+ return n;
3244+
3245+ if (!__builtin_constant_p(n))
3246+ check_object_size(from, n, true);
3247+
3248 return __copy_user(to, (__force void __user *) from, n);
3249 }
3250
3251 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3252 {
3253- if (n && __access_ok((unsigned long) from, n))
3254+ if ((long)n < 0)
3255+ return n;
3256+
3257+ if (n && __access_ok((unsigned long) from, n)) {
3258+ if (!__builtin_constant_p(n))
3259+ check_object_size(to, n, false);
3260 return __copy_user((__force void __user *) to, from, n);
3261- else
3262+ } else
3263 return n;
3264 }
3265
3266 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 return __copy_user((__force void __user *) to, from, n);
3272 }
3273
3274diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_64.h linux-3.1.1/arch/sparc/include/asm/uaccess_64.h
3275--- linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
3276+++ linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-16 18:39:07.000000000 -0500
3277@@ -10,6 +10,7 @@
3278 #include <linux/compiler.h>
3279 #include <linux/string.h>
3280 #include <linux/thread_info.h>
3281+#include <linux/kernel.h>
3282 #include <asm/asi.h>
3283 #include <asm/system.h>
3284 #include <asm/spitfire.h>
3285@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3286 static inline unsigned long __must_check
3287 copy_from_user(void *to, const void __user *from, unsigned long size)
3288 {
3289- unsigned long ret = ___copy_from_user(to, from, size);
3290+ unsigned long ret;
3291
3292+ if ((long)size < 0 || size > INT_MAX)
3293+ return size;
3294+
3295+ if (!__builtin_constant_p(size))
3296+ check_object_size(to, size, false);
3297+
3298+ ret = ___copy_from_user(to, from, size);
3299 if (unlikely(ret))
3300 ret = copy_from_user_fixup(to, from, size);
3301
3302@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3303 static inline unsigned long __must_check
3304 copy_to_user(void __user *to, const void *from, unsigned long size)
3305 {
3306- unsigned long ret = ___copy_to_user(to, from, size);
3307+ unsigned long ret;
3308+
3309+ if ((long)size < 0 || size > INT_MAX)
3310+ return size;
3311+
3312+ if (!__builtin_constant_p(size))
3313+ check_object_size(from, size, true);
3314
3315+ ret = ___copy_to_user(to, from, size);
3316 if (unlikely(ret))
3317 ret = copy_to_user_fixup(to, from, size);
3318 return ret;
3319diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess.h linux-3.1.1/arch/sparc/include/asm/uaccess.h
3320--- linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
3321+++ linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
3322@@ -1,5 +1,13 @@
3323 #ifndef ___ASM_SPARC_UACCESS_H
3324 #define ___ASM_SPARC_UACCESS_H
3325+
3326+#ifdef __KERNEL__
3327+#ifndef __ASSEMBLY__
3328+#include <linux/types.h>
3329+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3330+#endif
3331+#endif
3332+
3333 #if defined(__sparc__) && defined(__arch64__)
3334 #include <asm/uaccess_64.h>
3335 #else
3336diff -urNp linux-3.1.1/arch/sparc/kernel/Makefile linux-3.1.1/arch/sparc/kernel/Makefile
3337--- linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-11 15:19:27.000000000 -0500
3338+++ linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-16 18:39:07.000000000 -0500
3339@@ -3,7 +3,7 @@
3340 #
3341
3342 asflags-y := -ansi
3343-ccflags-y := -Werror
3344+#ccflags-y := -Werror
3345
3346 extra-y := head_$(BITS).o
3347 extra-y += init_task.o
3348diff -urNp linux-3.1.1/arch/sparc/kernel/process_32.c linux-3.1.1/arch/sparc/kernel/process_32.c
3349--- linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
3350+++ linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-16 18:40:08.000000000 -0500
3351@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3352 rw->ins[4], rw->ins[5],
3353 rw->ins[6],
3354 rw->ins[7]);
3355- printk("%pS\n", (void *) rw->ins[7]);
3356+ printk("%pA\n", (void *) rw->ins[7]);
3357 rw = (struct reg_window32 *) rw->ins[6];
3358 }
3359 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3360@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3361
3362 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3363 r->psr, r->pc, r->npc, r->y, print_tainted());
3364- printk("PC: <%pS>\n", (void *) r->pc);
3365+ printk("PC: <%pA>\n", (void *) r->pc);
3366 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3367 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3368 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3369 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3370 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3371 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3372- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3373+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3374
3375 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3376 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3377@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3378 rw = (struct reg_window32 *) fp;
3379 pc = rw->ins[7];
3380 printk("[%08lx : ", pc);
3381- printk("%pS ] ", (void *) pc);
3382+ printk("%pA ] ", (void *) pc);
3383 fp = rw->ins[6];
3384 } while (++count < 16);
3385 printk("\n");
3386diff -urNp linux-3.1.1/arch/sparc/kernel/process_64.c linux-3.1.1/arch/sparc/kernel/process_64.c
3387--- linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
3388+++ linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-16 18:40:08.000000000 -0500
3389@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3390 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3391 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3392 if (regs->tstate & TSTATE_PRIV)
3393- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3394+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3395 }
3396
3397 void show_regs(struct pt_regs *regs)
3398 {
3399 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3400 regs->tpc, regs->tnpc, regs->y, print_tainted());
3401- printk("TPC: <%pS>\n", (void *) regs->tpc);
3402+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3403 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3404 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3405 regs->u_regs[3]);
3406@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3407 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3408 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3409 regs->u_regs[15]);
3410- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3411+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3412 show_regwindow(regs);
3413 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3414 }
3415@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3416 ((tp && tp->task) ? tp->task->pid : -1));
3417
3418 if (gp->tstate & TSTATE_PRIV) {
3419- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3420+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3421 (void *) gp->tpc,
3422 (void *) gp->o7,
3423 (void *) gp->i7,
3424diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c
3425--- linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-11 15:19:27.000000000 -0500
3426+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-16 18:39:07.000000000 -0500
3427@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3428 if (ARCH_SUN4C && len > 0x20000000)
3429 return -ENOMEM;
3430 if (!addr)
3431- addr = TASK_UNMAPPED_BASE;
3432+ addr = current->mm->mmap_base;
3433
3434 if (flags & MAP_SHARED)
3435 addr = COLOUR_ALIGN(addr);
3436@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3437 }
3438 if (TASK_SIZE - PAGE_SIZE - len < addr)
3439 return -ENOMEM;
3440- if (!vmm || addr + len <= vmm->vm_start)
3441+ if (check_heap_stack_gap(vmm, addr, len))
3442 return addr;
3443 addr = vmm->vm_end;
3444 if (flags & MAP_SHARED)
3445diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c
3446--- linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-11 15:19:27.000000000 -0500
3447+++ linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-16 18:39:07.000000000 -0500
3448@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3449 /* We do not accept a shared mapping if it would violate
3450 * cache aliasing constraints.
3451 */
3452- if ((flags & MAP_SHARED) &&
3453+ if ((filp || (flags & MAP_SHARED)) &&
3454 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3455 return -EINVAL;
3456 return addr;
3457@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3458 if (filp || (flags & MAP_SHARED))
3459 do_color_align = 1;
3460
3461+#ifdef CONFIG_PAX_RANDMMAP
3462+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3463+#endif
3464+
3465 if (addr) {
3466 if (do_color_align)
3467 addr = COLOUR_ALIGN(addr, pgoff);
3468@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3469 addr = PAGE_ALIGN(addr);
3470
3471 vma = find_vma(mm, addr);
3472- if (task_size - len >= addr &&
3473- (!vma || addr + len <= vma->vm_start))
3474+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3475 return addr;
3476 }
3477
3478 if (len > mm->cached_hole_size) {
3479- start_addr = addr = mm->free_area_cache;
3480+ start_addr = addr = mm->free_area_cache;
3481 } else {
3482- start_addr = addr = TASK_UNMAPPED_BASE;
3483+ start_addr = addr = mm->mmap_base;
3484 mm->cached_hole_size = 0;
3485 }
3486
3487@@ -174,14 +177,14 @@ full_search:
3488 vma = find_vma(mm, VA_EXCLUDE_END);
3489 }
3490 if (unlikely(task_size < addr)) {
3491- if (start_addr != TASK_UNMAPPED_BASE) {
3492- start_addr = addr = TASK_UNMAPPED_BASE;
3493+ if (start_addr != mm->mmap_base) {
3494+ start_addr = addr = mm->mmap_base;
3495 mm->cached_hole_size = 0;
3496 goto full_search;
3497 }
3498 return -ENOMEM;
3499 }
3500- if (likely(!vma || addr + len <= vma->vm_start)) {
3501+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3502 /*
3503 * Remember the place where we stopped the search:
3504 */
3505@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3506 /* We do not accept a shared mapping if it would violate
3507 * cache aliasing constraints.
3508 */
3509- if ((flags & MAP_SHARED) &&
3510+ if ((filp || (flags & MAP_SHARED)) &&
3511 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3512 return -EINVAL;
3513 return addr;
3514@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3515 addr = PAGE_ALIGN(addr);
3516
3517 vma = find_vma(mm, addr);
3518- if (task_size - len >= addr &&
3519- (!vma || addr + len <= vma->vm_start))
3520+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3521 return addr;
3522 }
3523
3524@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3525 /* make sure it can fit in the remaining address space */
3526 if (likely(addr > len)) {
3527 vma = find_vma(mm, addr-len);
3528- if (!vma || addr <= vma->vm_start) {
3529+ if (check_heap_stack_gap(vma, addr - len, len)) {
3530 /* remember the address as a hint for next time */
3531 return (mm->free_area_cache = addr-len);
3532 }
3533@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3534 if (unlikely(mm->mmap_base < len))
3535 goto bottomup;
3536
3537- addr = mm->mmap_base-len;
3538- if (do_color_align)
3539- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3540+ addr = mm->mmap_base - len;
3541
3542 do {
3543+ if (do_color_align)
3544+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3545 /*
3546 * Lookup failure means no vma is above this address,
3547 * else if new region fits below vma->vm_start,
3548 * return with success:
3549 */
3550 vma = find_vma(mm, addr);
3551- if (likely(!vma || addr+len <= vma->vm_start)) {
3552+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3553 /* remember the address as a hint for next time */
3554 return (mm->free_area_cache = addr);
3555 }
3556@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3557 mm->cached_hole_size = vma->vm_start - addr;
3558
3559 /* try just below the current vma->vm_start */
3560- addr = vma->vm_start-len;
3561- if (do_color_align)
3562- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3563- } while (likely(len < vma->vm_start));
3564+ addr = skip_heap_stack_gap(vma, len);
3565+ } while (!IS_ERR_VALUE(addr));
3566
3567 bottomup:
3568 /*
3569@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3570 gap == RLIM_INFINITY ||
3571 sysctl_legacy_va_layout) {
3572 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3573+
3574+#ifdef CONFIG_PAX_RANDMMAP
3575+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3576+ mm->mmap_base += mm->delta_mmap;
3577+#endif
3578+
3579 mm->get_unmapped_area = arch_get_unmapped_area;
3580 mm->unmap_area = arch_unmap_area;
3581 } else {
3582@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3583 gap = (task_size / 6 * 5);
3584
3585 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3586+
3587+#ifdef CONFIG_PAX_RANDMMAP
3588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3589+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3590+#endif
3591+
3592 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3593 mm->unmap_area = arch_unmap_area_topdown;
3594 }
3595diff -urNp linux-3.1.1/arch/sparc/kernel/traps_32.c linux-3.1.1/arch/sparc/kernel/traps_32.c
3596--- linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-11 15:19:27.000000000 -0500
3597+++ linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-16 18:40:08.000000000 -0500
3598@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3599 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3600 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3601
3602+extern void gr_handle_kernel_exploit(void);
3603+
3604 void die_if_kernel(char *str, struct pt_regs *regs)
3605 {
3606 static int die_counter;
3607@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3608 count++ < 30 &&
3609 (((unsigned long) rw) >= PAGE_OFFSET) &&
3610 !(((unsigned long) rw) & 0x7)) {
3611- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3612+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3613 (void *) rw->ins[7]);
3614 rw = (struct reg_window32 *)rw->ins[6];
3615 }
3616 }
3617 printk("Instruction DUMP:");
3618 instruction_dump ((unsigned long *) regs->pc);
3619- if(regs->psr & PSR_PS)
3620+ if(regs->psr & PSR_PS) {
3621+ gr_handle_kernel_exploit();
3622 do_exit(SIGKILL);
3623+ }
3624 do_exit(SIGSEGV);
3625 }
3626
3627diff -urNp linux-3.1.1/arch/sparc/kernel/traps_64.c linux-3.1.1/arch/sparc/kernel/traps_64.c
3628--- linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-11 15:19:27.000000000 -0500
3629+++ linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-16 18:40:08.000000000 -0500
3630@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3631 i + 1,
3632 p->trapstack[i].tstate, p->trapstack[i].tpc,
3633 p->trapstack[i].tnpc, p->trapstack[i].tt);
3634- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3635+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3636 }
3637 }
3638
3639@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3640
3641 lvl -= 0x100;
3642 if (regs->tstate & TSTATE_PRIV) {
3643+
3644+#ifdef CONFIG_PAX_REFCOUNT
3645+ if (lvl == 6)
3646+ pax_report_refcount_overflow(regs);
3647+#endif
3648+
3649 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3650 die_if_kernel(buffer, regs);
3651 }
3652@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3653 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3654 {
3655 char buffer[32];
3656-
3657+
3658 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3659 0, lvl, SIGTRAP) == NOTIFY_STOP)
3660 return;
3661
3662+#ifdef CONFIG_PAX_REFCOUNT
3663+ if (lvl == 6)
3664+ pax_report_refcount_overflow(regs);
3665+#endif
3666+
3667 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3668
3669 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3670@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3671 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3672 printk("%s" "ERROR(%d): ",
3673 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3674- printk("TPC<%pS>\n", (void *) regs->tpc);
3675+ printk("TPC<%pA>\n", (void *) regs->tpc);
3676 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3677 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3678 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3679@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3680 smp_processor_id(),
3681 (type & 0x1) ? 'I' : 'D',
3682 regs->tpc);
3683- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3684+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3685 panic("Irrecoverable Cheetah+ parity error.");
3686 }
3687
3688@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3689 smp_processor_id(),
3690 (type & 0x1) ? 'I' : 'D',
3691 regs->tpc);
3692- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3693+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3694 }
3695
3696 struct sun4v_error_entry {
3697@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3698
3699 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3700 regs->tpc, tl);
3701- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3702+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3703 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3704- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3705+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3706 (void *) regs->u_regs[UREG_I7]);
3707 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3708 "pte[%lx] error[%lx]\n",
3709@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3710
3711 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3712 regs->tpc, tl);
3713- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3714+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3715 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3716- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3717+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3718 (void *) regs->u_regs[UREG_I7]);
3719 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3720 "pte[%lx] error[%lx]\n",
3721@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3722 fp = (unsigned long)sf->fp + STACK_BIAS;
3723 }
3724
3725- printk(" [%016lx] %pS\n", pc, (void *) pc);
3726+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3727 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3728 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3729 int index = tsk->curr_ret_stack;
3730 if (tsk->ret_stack && index >= graph) {
3731 pc = tsk->ret_stack[index - graph].ret;
3732- printk(" [%016lx] %pS\n", pc, (void *) pc);
3733+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3734 graph++;
3735 }
3736 }
3737@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3738 return (struct reg_window *) (fp + STACK_BIAS);
3739 }
3740
3741+extern void gr_handle_kernel_exploit(void);
3742+
3743 void die_if_kernel(char *str, struct pt_regs *regs)
3744 {
3745 static int die_counter;
3746@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3747 while (rw &&
3748 count++ < 30 &&
3749 kstack_valid(tp, (unsigned long) rw)) {
3750- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3751+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3752 (void *) rw->ins[7]);
3753
3754 rw = kernel_stack_up(rw);
3755@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3756 }
3757 user_instruction_dump ((unsigned int __user *) regs->tpc);
3758 }
3759- if (regs->tstate & TSTATE_PRIV)
3760+ if (regs->tstate & TSTATE_PRIV) {
3761+ gr_handle_kernel_exploit();
3762 do_exit(SIGKILL);
3763+ }
3764 do_exit(SIGSEGV);
3765 }
3766 EXPORT_SYMBOL(die_if_kernel);
3767diff -urNp linux-3.1.1/arch/sparc/kernel/unaligned_64.c linux-3.1.1/arch/sparc/kernel/unaligned_64.c
3768--- linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-11 15:19:27.000000000 -0500
3769+++ linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-16 18:40:08.000000000 -0500
3770@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3771 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3772
3773 if (__ratelimit(&ratelimit)) {
3774- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3775+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3776 regs->tpc, (void *) regs->tpc);
3777 }
3778 }
3779diff -urNp linux-3.1.1/arch/sparc/lib/atomic_64.S linux-3.1.1/arch/sparc/lib/atomic_64.S
3780--- linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-11 15:19:27.000000000 -0500
3781+++ linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-16 18:39:07.000000000 -0500
3782@@ -18,7 +18,12 @@
3783 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3784 BACKOFF_SETUP(%o2)
3785 1: lduw [%o1], %g1
3786- add %g1, %o0, %g7
3787+ addcc %g1, %o0, %g7
3788+
3789+#ifdef CONFIG_PAX_REFCOUNT
3790+ tvs %icc, 6
3791+#endif
3792+
3793 cas [%o1], %g1, %g7
3794 cmp %g1, %g7
3795 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3796@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3797 2: BACKOFF_SPIN(%o2, %o3, 1b)
3798 .size atomic_add, .-atomic_add
3799
3800+ .globl atomic_add_unchecked
3801+ .type atomic_add_unchecked,#function
3802+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3803+ BACKOFF_SETUP(%o2)
3804+1: lduw [%o1], %g1
3805+ add %g1, %o0, %g7
3806+ cas [%o1], %g1, %g7
3807+ cmp %g1, %g7
3808+ bne,pn %icc, 2f
3809+ nop
3810+ retl
3811+ nop
3812+2: BACKOFF_SPIN(%o2, %o3, 1b)
3813+ .size atomic_add_unchecked, .-atomic_add_unchecked
3814+
3815 .globl atomic_sub
3816 .type atomic_sub,#function
3817 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3818 BACKOFF_SETUP(%o2)
3819 1: lduw [%o1], %g1
3820- sub %g1, %o0, %g7
3821+ subcc %g1, %o0, %g7
3822+
3823+#ifdef CONFIG_PAX_REFCOUNT
3824+ tvs %icc, 6
3825+#endif
3826+
3827 cas [%o1], %g1, %g7
3828 cmp %g1, %g7
3829 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3830@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3831 2: BACKOFF_SPIN(%o2, %o3, 1b)
3832 .size atomic_sub, .-atomic_sub
3833
3834+ .globl atomic_sub_unchecked
3835+ .type atomic_sub_unchecked,#function
3836+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3837+ BACKOFF_SETUP(%o2)
3838+1: lduw [%o1], %g1
3839+ sub %g1, %o0, %g7
3840+ cas [%o1], %g1, %g7
3841+ cmp %g1, %g7
3842+ bne,pn %icc, 2f
3843+ nop
3844+ retl
3845+ nop
3846+2: BACKOFF_SPIN(%o2, %o3, 1b)
3847+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3848+
3849 .globl atomic_add_ret
3850 .type atomic_add_ret,#function
3851 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3852 BACKOFF_SETUP(%o2)
3853 1: lduw [%o1], %g1
3854- add %g1, %o0, %g7
3855+ addcc %g1, %o0, %g7
3856+
3857+#ifdef CONFIG_PAX_REFCOUNT
3858+ tvs %icc, 6
3859+#endif
3860+
3861 cas [%o1], %g1, %g7
3862 cmp %g1, %g7
3863 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3864@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3865 2: BACKOFF_SPIN(%o2, %o3, 1b)
3866 .size atomic_add_ret, .-atomic_add_ret
3867
3868+ .globl atomic_add_ret_unchecked
3869+ .type atomic_add_ret_unchecked,#function
3870+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3871+ BACKOFF_SETUP(%o2)
3872+1: lduw [%o1], %g1
3873+ addcc %g1, %o0, %g7
3874+ cas [%o1], %g1, %g7
3875+ cmp %g1, %g7
3876+ bne,pn %icc, 2f
3877+ add %g7, %o0, %g7
3878+ sra %g7, 0, %o0
3879+ retl
3880+ nop
3881+2: BACKOFF_SPIN(%o2, %o3, 1b)
3882+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3883+
3884 .globl atomic_sub_ret
3885 .type atomic_sub_ret,#function
3886 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3887 BACKOFF_SETUP(%o2)
3888 1: lduw [%o1], %g1
3889- sub %g1, %o0, %g7
3890+ subcc %g1, %o0, %g7
3891+
3892+#ifdef CONFIG_PAX_REFCOUNT
3893+ tvs %icc, 6
3894+#endif
3895+
3896 cas [%o1], %g1, %g7
3897 cmp %g1, %g7
3898 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3899@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3900 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3901 BACKOFF_SETUP(%o2)
3902 1: ldx [%o1], %g1
3903- add %g1, %o0, %g7
3904+ addcc %g1, %o0, %g7
3905+
3906+#ifdef CONFIG_PAX_REFCOUNT
3907+ tvs %xcc, 6
3908+#endif
3909+
3910 casx [%o1], %g1, %g7
3911 cmp %g1, %g7
3912 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3913@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3914 2: BACKOFF_SPIN(%o2, %o3, 1b)
3915 .size atomic64_add, .-atomic64_add
3916
3917+ .globl atomic64_add_unchecked
3918+ .type atomic64_add_unchecked,#function
3919+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3920+ BACKOFF_SETUP(%o2)
3921+1: ldx [%o1], %g1
3922+ addcc %g1, %o0, %g7
3923+ casx [%o1], %g1, %g7
3924+ cmp %g1, %g7
3925+ bne,pn %xcc, 2f
3926+ nop
3927+ retl
3928+ nop
3929+2: BACKOFF_SPIN(%o2, %o3, 1b)
3930+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3931+
3932 .globl atomic64_sub
3933 .type atomic64_sub,#function
3934 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3935 BACKOFF_SETUP(%o2)
3936 1: ldx [%o1], %g1
3937- sub %g1, %o0, %g7
3938+ subcc %g1, %o0, %g7
3939+
3940+#ifdef CONFIG_PAX_REFCOUNT
3941+ tvs %xcc, 6
3942+#endif
3943+
3944 casx [%o1], %g1, %g7
3945 cmp %g1, %g7
3946 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3947@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3948 2: BACKOFF_SPIN(%o2, %o3, 1b)
3949 .size atomic64_sub, .-atomic64_sub
3950
3951+ .globl atomic64_sub_unchecked
3952+ .type atomic64_sub_unchecked,#function
3953+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3954+ BACKOFF_SETUP(%o2)
3955+1: ldx [%o1], %g1
3956+ subcc %g1, %o0, %g7
3957+ casx [%o1], %g1, %g7
3958+ cmp %g1, %g7
3959+ bne,pn %xcc, 2f
3960+ nop
3961+ retl
3962+ nop
3963+2: BACKOFF_SPIN(%o2, %o3, 1b)
3964+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3965+
3966 .globl atomic64_add_ret
3967 .type atomic64_add_ret,#function
3968 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3969 BACKOFF_SETUP(%o2)
3970 1: ldx [%o1], %g1
3971- add %g1, %o0, %g7
3972+ addcc %g1, %o0, %g7
3973+
3974+#ifdef CONFIG_PAX_REFCOUNT
3975+ tvs %xcc, 6
3976+#endif
3977+
3978 casx [%o1], %g1, %g7
3979 cmp %g1, %g7
3980 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3981@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
3982 2: BACKOFF_SPIN(%o2, %o3, 1b)
3983 .size atomic64_add_ret, .-atomic64_add_ret
3984
3985+ .globl atomic64_add_ret_unchecked
3986+ .type atomic64_add_ret_unchecked,#function
3987+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3988+ BACKOFF_SETUP(%o2)
3989+1: ldx [%o1], %g1
3990+ addcc %g1, %o0, %g7
3991+ casx [%o1], %g1, %g7
3992+ cmp %g1, %g7
3993+ bne,pn %xcc, 2f
3994+ add %g7, %o0, %g7
3995+ mov %g7, %o0
3996+ retl
3997+ nop
3998+2: BACKOFF_SPIN(%o2, %o3, 1b)
3999+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4000+
4001 .globl atomic64_sub_ret
4002 .type atomic64_sub_ret,#function
4003 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4004 BACKOFF_SETUP(%o2)
4005 1: ldx [%o1], %g1
4006- sub %g1, %o0, %g7
4007+ subcc %g1, %o0, %g7
4008+
4009+#ifdef CONFIG_PAX_REFCOUNT
4010+ tvs %xcc, 6
4011+#endif
4012+
4013 casx [%o1], %g1, %g7
4014 cmp %g1, %g7
4015 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4016diff -urNp linux-3.1.1/arch/sparc/lib/ksyms.c linux-3.1.1/arch/sparc/lib/ksyms.c
4017--- linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-11 15:19:27.000000000 -0500
4018+++ linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-16 18:39:07.000000000 -0500
4019@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4020
4021 /* Atomic counter implementation. */
4022 EXPORT_SYMBOL(atomic_add);
4023+EXPORT_SYMBOL(atomic_add_unchecked);
4024 EXPORT_SYMBOL(atomic_add_ret);
4025+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4026 EXPORT_SYMBOL(atomic_sub);
4027+EXPORT_SYMBOL(atomic_sub_unchecked);
4028 EXPORT_SYMBOL(atomic_sub_ret);
4029 EXPORT_SYMBOL(atomic64_add);
4030+EXPORT_SYMBOL(atomic64_add_unchecked);
4031 EXPORT_SYMBOL(atomic64_add_ret);
4032+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4033 EXPORT_SYMBOL(atomic64_sub);
4034+EXPORT_SYMBOL(atomic64_sub_unchecked);
4035 EXPORT_SYMBOL(atomic64_sub_ret);
4036
4037 /* Atomic bit operations. */
4038diff -urNp linux-3.1.1/arch/sparc/lib/Makefile linux-3.1.1/arch/sparc/lib/Makefile
4039--- linux-3.1.1/arch/sparc/lib/Makefile 2011-11-11 15:19:27.000000000 -0500
4040+++ linux-3.1.1/arch/sparc/lib/Makefile 2011-11-16 18:39:07.000000000 -0500
4041@@ -2,7 +2,7 @@
4042 #
4043
4044 asflags-y := -ansi -DST_DIV0=0x02
4045-ccflags-y := -Werror
4046+#ccflags-y := -Werror
4047
4048 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4049 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4050diff -urNp linux-3.1.1/arch/sparc/Makefile linux-3.1.1/arch/sparc/Makefile
4051--- linux-3.1.1/arch/sparc/Makefile 2011-11-11 15:19:27.000000000 -0500
4052+++ linux-3.1.1/arch/sparc/Makefile 2011-11-16 18:40:08.000000000 -0500
4053@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4054 # Export what is needed by arch/sparc/boot/Makefile
4055 export VMLINUX_INIT VMLINUX_MAIN
4056 VMLINUX_INIT := $(head-y) $(init-y)
4057-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4058+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4059 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4060 VMLINUX_MAIN += $(drivers-y) $(net-y)
4061
4062diff -urNp linux-3.1.1/arch/sparc/mm/fault_32.c linux-3.1.1/arch/sparc/mm/fault_32.c
4063--- linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-11 15:19:27.000000000 -0500
4064+++ linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-16 18:39:07.000000000 -0500
4065@@ -22,6 +22,9 @@
4066 #include <linux/interrupt.h>
4067 #include <linux/module.h>
4068 #include <linux/kdebug.h>
4069+#include <linux/slab.h>
4070+#include <linux/pagemap.h>
4071+#include <linux/compiler.h>
4072
4073 #include <asm/system.h>
4074 #include <asm/page.h>
4075@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4076 return safe_compute_effective_address(regs, insn);
4077 }
4078
4079+#ifdef CONFIG_PAX_PAGEEXEC
4080+#ifdef CONFIG_PAX_DLRESOLVE
4081+static void pax_emuplt_close(struct vm_area_struct *vma)
4082+{
4083+ vma->vm_mm->call_dl_resolve = 0UL;
4084+}
4085+
4086+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4087+{
4088+ unsigned int *kaddr;
4089+
4090+ vmf->page = alloc_page(GFP_HIGHUSER);
4091+ if (!vmf->page)
4092+ return VM_FAULT_OOM;
4093+
4094+ kaddr = kmap(vmf->page);
4095+ memset(kaddr, 0, PAGE_SIZE);
4096+ kaddr[0] = 0x9DE3BFA8U; /* save */
4097+ flush_dcache_page(vmf->page);
4098+ kunmap(vmf->page);
4099+ return VM_FAULT_MAJOR;
4100+}
4101+
4102+static const struct vm_operations_struct pax_vm_ops = {
4103+ .close = pax_emuplt_close,
4104+ .fault = pax_emuplt_fault
4105+};
4106+
4107+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4108+{
4109+ int ret;
4110+
4111+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4112+ vma->vm_mm = current->mm;
4113+ vma->vm_start = addr;
4114+ vma->vm_end = addr + PAGE_SIZE;
4115+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4116+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4117+ vma->vm_ops = &pax_vm_ops;
4118+
4119+ ret = insert_vm_struct(current->mm, vma);
4120+ if (ret)
4121+ return ret;
4122+
4123+ ++current->mm->total_vm;
4124+ return 0;
4125+}
4126+#endif
4127+
4128+/*
4129+ * PaX: decide what to do with offenders (regs->pc = fault address)
4130+ *
4131+ * returns 1 when task should be killed
4132+ * 2 when patched PLT trampoline was detected
4133+ * 3 when unpatched PLT trampoline was detected
4134+ */
4135+static int pax_handle_fetch_fault(struct pt_regs *regs)
4136+{
4137+
4138+#ifdef CONFIG_PAX_EMUPLT
4139+ int err;
4140+
4141+ do { /* PaX: patched PLT emulation #1 */
4142+ unsigned int sethi1, sethi2, jmpl;
4143+
4144+ err = get_user(sethi1, (unsigned int *)regs->pc);
4145+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4146+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4147+
4148+ if (err)
4149+ break;
4150+
4151+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4152+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4153+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4154+ {
4155+ unsigned int addr;
4156+
4157+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4158+ addr = regs->u_regs[UREG_G1];
4159+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4160+ regs->pc = addr;
4161+ regs->npc = addr+4;
4162+ return 2;
4163+ }
4164+ } while (0);
4165+
4166+ { /* PaX: patched PLT emulation #2 */
4167+ unsigned int ba;
4168+
4169+ err = get_user(ba, (unsigned int *)regs->pc);
4170+
4171+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4172+ unsigned int addr;
4173+
4174+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4175+ regs->pc = addr;
4176+ regs->npc = addr+4;
4177+ return 2;
4178+ }
4179+ }
4180+
4181+ do { /* PaX: patched PLT emulation #3 */
4182+ unsigned int sethi, jmpl, nop;
4183+
4184+ err = get_user(sethi, (unsigned int *)regs->pc);
4185+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4186+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4187+
4188+ if (err)
4189+ break;
4190+
4191+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4192+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4193+ nop == 0x01000000U)
4194+ {
4195+ unsigned int addr;
4196+
4197+ addr = (sethi & 0x003FFFFFU) << 10;
4198+ regs->u_regs[UREG_G1] = addr;
4199+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4200+ regs->pc = addr;
4201+ regs->npc = addr+4;
4202+ return 2;
4203+ }
4204+ } while (0);
4205+
4206+ do { /* PaX: unpatched PLT emulation step 1 */
4207+ unsigned int sethi, ba, nop;
4208+
4209+ err = get_user(sethi, (unsigned int *)regs->pc);
4210+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4211+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4212+
4213+ if (err)
4214+ break;
4215+
4216+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4217+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4218+ nop == 0x01000000U)
4219+ {
4220+ unsigned int addr, save, call;
4221+
4222+ if ((ba & 0xFFC00000U) == 0x30800000U)
4223+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4224+ else
4225+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4226+
4227+ err = get_user(save, (unsigned int *)addr);
4228+ err |= get_user(call, (unsigned int *)(addr+4));
4229+ err |= get_user(nop, (unsigned int *)(addr+8));
4230+ if (err)
4231+ break;
4232+
4233+#ifdef CONFIG_PAX_DLRESOLVE
4234+ if (save == 0x9DE3BFA8U &&
4235+ (call & 0xC0000000U) == 0x40000000U &&
4236+ nop == 0x01000000U)
4237+ {
4238+ struct vm_area_struct *vma;
4239+ unsigned long call_dl_resolve;
4240+
4241+ down_read(&current->mm->mmap_sem);
4242+ call_dl_resolve = current->mm->call_dl_resolve;
4243+ up_read(&current->mm->mmap_sem);
4244+ if (likely(call_dl_resolve))
4245+ goto emulate;
4246+
4247+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4248+
4249+ down_write(&current->mm->mmap_sem);
4250+ if (current->mm->call_dl_resolve) {
4251+ call_dl_resolve = current->mm->call_dl_resolve;
4252+ up_write(&current->mm->mmap_sem);
4253+ if (vma)
4254+ kmem_cache_free(vm_area_cachep, vma);
4255+ goto emulate;
4256+ }
4257+
4258+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4259+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4260+ up_write(&current->mm->mmap_sem);
4261+ if (vma)
4262+ kmem_cache_free(vm_area_cachep, vma);
4263+ return 1;
4264+ }
4265+
4266+ if (pax_insert_vma(vma, call_dl_resolve)) {
4267+ up_write(&current->mm->mmap_sem);
4268+ kmem_cache_free(vm_area_cachep, vma);
4269+ return 1;
4270+ }
4271+
4272+ current->mm->call_dl_resolve = call_dl_resolve;
4273+ up_write(&current->mm->mmap_sem);
4274+
4275+emulate:
4276+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4277+ regs->pc = call_dl_resolve;
4278+ regs->npc = addr+4;
4279+ return 3;
4280+ }
4281+#endif
4282+
4283+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4284+ if ((save & 0xFFC00000U) == 0x05000000U &&
4285+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4286+ nop == 0x01000000U)
4287+ {
4288+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4289+ regs->u_regs[UREG_G2] = addr + 4;
4290+ addr = (save & 0x003FFFFFU) << 10;
4291+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4292+ regs->pc = addr;
4293+ regs->npc = addr+4;
4294+ return 3;
4295+ }
4296+ }
4297+ } while (0);
4298+
4299+ do { /* PaX: unpatched PLT emulation step 2 */
4300+ unsigned int save, call, nop;
4301+
4302+ err = get_user(save, (unsigned int *)(regs->pc-4));
4303+ err |= get_user(call, (unsigned int *)regs->pc);
4304+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4305+ if (err)
4306+ break;
4307+
4308+ if (save == 0x9DE3BFA8U &&
4309+ (call & 0xC0000000U) == 0x40000000U &&
4310+ nop == 0x01000000U)
4311+ {
4312+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4313+
4314+ regs->u_regs[UREG_RETPC] = regs->pc;
4315+ regs->pc = dl_resolve;
4316+ regs->npc = dl_resolve+4;
4317+ return 3;
4318+ }
4319+ } while (0);
4320+#endif
4321+
4322+ return 1;
4323+}
4324+
4325+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4326+{
4327+ unsigned long i;
4328+
4329+ printk(KERN_ERR "PAX: bytes at PC: ");
4330+ for (i = 0; i < 8; i++) {
4331+ unsigned int c;
4332+ if (get_user(c, (unsigned int *)pc+i))
4333+ printk(KERN_CONT "???????? ");
4334+ else
4335+ printk(KERN_CONT "%08x ", c);
4336+ }
4337+ printk("\n");
4338+}
4339+#endif
4340+
4341 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4342 int text_fault)
4343 {
4344@@ -281,6 +546,24 @@ good_area:
4345 if(!(vma->vm_flags & VM_WRITE))
4346 goto bad_area;
4347 } else {
4348+
4349+#ifdef CONFIG_PAX_PAGEEXEC
4350+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4351+ up_read(&mm->mmap_sem);
4352+ switch (pax_handle_fetch_fault(regs)) {
4353+
4354+#ifdef CONFIG_PAX_EMUPLT
4355+ case 2:
4356+ case 3:
4357+ return;
4358+#endif
4359+
4360+ }
4361+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4362+ do_group_exit(SIGKILL);
4363+ }
4364+#endif
4365+
4366 /* Allow reads even for write-only mappings */
4367 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4368 goto bad_area;
4369diff -urNp linux-3.1.1/arch/sparc/mm/fault_64.c linux-3.1.1/arch/sparc/mm/fault_64.c
4370--- linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-11 15:19:27.000000000 -0500
4371+++ linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-16 18:40:08.000000000 -0500
4372@@ -21,6 +21,9 @@
4373 #include <linux/kprobes.h>
4374 #include <linux/kdebug.h>
4375 #include <linux/percpu.h>
4376+#include <linux/slab.h>
4377+#include <linux/pagemap.h>
4378+#include <linux/compiler.h>
4379
4380 #include <asm/page.h>
4381 #include <asm/pgtable.h>
4382@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4383 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4384 regs->tpc);
4385 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4386- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4387+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4388 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4389 dump_stack();
4390 unhandled_fault(regs->tpc, current, regs);
4391@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4392 show_regs(regs);
4393 }
4394
4395+#ifdef CONFIG_PAX_PAGEEXEC
4396+#ifdef CONFIG_PAX_DLRESOLVE
4397+static void pax_emuplt_close(struct vm_area_struct *vma)
4398+{
4399+ vma->vm_mm->call_dl_resolve = 0UL;
4400+}
4401+
4402+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4403+{
4404+ unsigned int *kaddr;
4405+
4406+ vmf->page = alloc_page(GFP_HIGHUSER);
4407+ if (!vmf->page)
4408+ return VM_FAULT_OOM;
4409+
4410+ kaddr = kmap(vmf->page);
4411+ memset(kaddr, 0, PAGE_SIZE);
4412+ kaddr[0] = 0x9DE3BFA8U; /* save */
4413+ flush_dcache_page(vmf->page);
4414+ kunmap(vmf->page);
4415+ return VM_FAULT_MAJOR;
4416+}
4417+
4418+static const struct vm_operations_struct pax_vm_ops = {
4419+ .close = pax_emuplt_close,
4420+ .fault = pax_emuplt_fault
4421+};
4422+
4423+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4424+{
4425+ int ret;
4426+
4427+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4428+ vma->vm_mm = current->mm;
4429+ vma->vm_start = addr;
4430+ vma->vm_end = addr + PAGE_SIZE;
4431+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4432+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4433+ vma->vm_ops = &pax_vm_ops;
4434+
4435+ ret = insert_vm_struct(current->mm, vma);
4436+ if (ret)
4437+ return ret;
4438+
4439+ ++current->mm->total_vm;
4440+ return 0;
4441+}
4442+#endif
4443+
4444+/*
4445+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4446+ *
4447+ * returns 1 when task should be killed
4448+ * 2 when patched PLT trampoline was detected
4449+ * 3 when unpatched PLT trampoline was detected
4450+ */
4451+static int pax_handle_fetch_fault(struct pt_regs *regs)
4452+{
4453+
4454+#ifdef CONFIG_PAX_EMUPLT
4455+ int err;
4456+
4457+ do { /* PaX: patched PLT emulation #1 */
4458+ unsigned int sethi1, sethi2, jmpl;
4459+
4460+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4461+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4462+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4463+
4464+ if (err)
4465+ break;
4466+
4467+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4468+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4469+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4470+ {
4471+ unsigned long addr;
4472+
4473+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4474+ addr = regs->u_regs[UREG_G1];
4475+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4476+
4477+ if (test_thread_flag(TIF_32BIT))
4478+ addr &= 0xFFFFFFFFUL;
4479+
4480+ regs->tpc = addr;
4481+ regs->tnpc = addr+4;
4482+ return 2;
4483+ }
4484+ } while (0);
4485+
4486+ { /* PaX: patched PLT emulation #2 */
4487+ unsigned int ba;
4488+
4489+ err = get_user(ba, (unsigned int *)regs->tpc);
4490+
4491+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4492+ unsigned long addr;
4493+
4494+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4495+
4496+ if (test_thread_flag(TIF_32BIT))
4497+ addr &= 0xFFFFFFFFUL;
4498+
4499+ regs->tpc = addr;
4500+ regs->tnpc = addr+4;
4501+ return 2;
4502+ }
4503+ }
4504+
4505+ do { /* PaX: patched PLT emulation #3 */
4506+ unsigned int sethi, jmpl, nop;
4507+
4508+ err = get_user(sethi, (unsigned int *)regs->tpc);
4509+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4510+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4511+
4512+ if (err)
4513+ break;
4514+
4515+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4516+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4517+ nop == 0x01000000U)
4518+ {
4519+ unsigned long addr;
4520+
4521+ addr = (sethi & 0x003FFFFFU) << 10;
4522+ regs->u_regs[UREG_G1] = addr;
4523+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ } while (0);
4533+
4534+ do { /* PaX: patched PLT emulation #4 */
4535+ unsigned int sethi, mov1, call, mov2;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4540+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4541+
4542+ if (err)
4543+ break;
4544+
4545+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4546+ mov1 == 0x8210000FU &&
4547+ (call & 0xC0000000U) == 0x40000000U &&
4548+ mov2 == 0x9E100001U)
4549+ {
4550+ unsigned long addr;
4551+
4552+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4553+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4554+
4555+ if (test_thread_flag(TIF_32BIT))
4556+ addr &= 0xFFFFFFFFUL;
4557+
4558+ regs->tpc = addr;
4559+ regs->tnpc = addr+4;
4560+ return 2;
4561+ }
4562+ } while (0);
4563+
4564+ do { /* PaX: patched PLT emulation #5 */
4565+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4566+
4567+ err = get_user(sethi, (unsigned int *)regs->tpc);
4568+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4569+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4570+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4571+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4572+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4573+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4574+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4575+
4576+ if (err)
4577+ break;
4578+
4579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4580+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4581+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4582+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4583+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4584+ sllx == 0x83287020U &&
4585+ jmpl == 0x81C04005U &&
4586+ nop == 0x01000000U)
4587+ {
4588+ unsigned long addr;
4589+
4590+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4591+ regs->u_regs[UREG_G1] <<= 32;
4592+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4593+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4594+ regs->tpc = addr;
4595+ regs->tnpc = addr+4;
4596+ return 2;
4597+ }
4598+ } while (0);
4599+
4600+ do { /* PaX: patched PLT emulation #6 */
4601+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4602+
4603+ err = get_user(sethi, (unsigned int *)regs->tpc);
4604+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4605+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4606+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4607+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4608+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4609+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4610+
4611+ if (err)
4612+ break;
4613+
4614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4615+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4616+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4617+ sllx == 0x83287020U &&
4618+ (or & 0xFFFFE000U) == 0x8A116000U &&
4619+ jmpl == 0x81C04005U &&
4620+ nop == 0x01000000U)
4621+ {
4622+ unsigned long addr;
4623+
4624+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4625+ regs->u_regs[UREG_G1] <<= 32;
4626+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4627+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4628+ regs->tpc = addr;
4629+ regs->tnpc = addr+4;
4630+ return 2;
4631+ }
4632+ } while (0);
4633+
4634+ do { /* PaX: unpatched PLT emulation step 1 */
4635+ unsigned int sethi, ba, nop;
4636+
4637+ err = get_user(sethi, (unsigned int *)regs->tpc);
4638+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4639+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4640+
4641+ if (err)
4642+ break;
4643+
4644+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4645+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4646+ nop == 0x01000000U)
4647+ {
4648+ unsigned long addr;
4649+ unsigned int save, call;
4650+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4651+
4652+ if ((ba & 0xFFC00000U) == 0x30800000U)
4653+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4654+ else
4655+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4656+
4657+ if (test_thread_flag(TIF_32BIT))
4658+ addr &= 0xFFFFFFFFUL;
4659+
4660+ err = get_user(save, (unsigned int *)addr);
4661+ err |= get_user(call, (unsigned int *)(addr+4));
4662+ err |= get_user(nop, (unsigned int *)(addr+8));
4663+ if (err)
4664+ break;
4665+
4666+#ifdef CONFIG_PAX_DLRESOLVE
4667+ if (save == 0x9DE3BFA8U &&
4668+ (call & 0xC0000000U) == 0x40000000U &&
4669+ nop == 0x01000000U)
4670+ {
4671+ struct vm_area_struct *vma;
4672+ unsigned long call_dl_resolve;
4673+
4674+ down_read(&current->mm->mmap_sem);
4675+ call_dl_resolve = current->mm->call_dl_resolve;
4676+ up_read(&current->mm->mmap_sem);
4677+ if (likely(call_dl_resolve))
4678+ goto emulate;
4679+
4680+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4681+
4682+ down_write(&current->mm->mmap_sem);
4683+ if (current->mm->call_dl_resolve) {
4684+ call_dl_resolve = current->mm->call_dl_resolve;
4685+ up_write(&current->mm->mmap_sem);
4686+ if (vma)
4687+ kmem_cache_free(vm_area_cachep, vma);
4688+ goto emulate;
4689+ }
4690+
4691+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4692+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4693+ up_write(&current->mm->mmap_sem);
4694+ if (vma)
4695+ kmem_cache_free(vm_area_cachep, vma);
4696+ return 1;
4697+ }
4698+
4699+ if (pax_insert_vma(vma, call_dl_resolve)) {
4700+ up_write(&current->mm->mmap_sem);
4701+ kmem_cache_free(vm_area_cachep, vma);
4702+ return 1;
4703+ }
4704+
4705+ current->mm->call_dl_resolve = call_dl_resolve;
4706+ up_write(&current->mm->mmap_sem);
4707+
4708+emulate:
4709+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4710+ regs->tpc = call_dl_resolve;
4711+ regs->tnpc = addr+4;
4712+ return 3;
4713+ }
4714+#endif
4715+
4716+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4717+ if ((save & 0xFFC00000U) == 0x05000000U &&
4718+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4719+ nop == 0x01000000U)
4720+ {
4721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4722+ regs->u_regs[UREG_G2] = addr + 4;
4723+ addr = (save & 0x003FFFFFU) << 10;
4724+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4725+
4726+ if (test_thread_flag(TIF_32BIT))
4727+ addr &= 0xFFFFFFFFUL;
4728+
4729+ regs->tpc = addr;
4730+ regs->tnpc = addr+4;
4731+ return 3;
4732+ }
4733+
4734+ /* PaX: 64-bit PLT stub */
4735+ err = get_user(sethi1, (unsigned int *)addr);
4736+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4737+ err |= get_user(or1, (unsigned int *)(addr+8));
4738+ err |= get_user(or2, (unsigned int *)(addr+12));
4739+ err |= get_user(sllx, (unsigned int *)(addr+16));
4740+ err |= get_user(add, (unsigned int *)(addr+20));
4741+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4742+ err |= get_user(nop, (unsigned int *)(addr+28));
4743+ if (err)
4744+ break;
4745+
4746+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4747+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4748+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4749+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4750+ sllx == 0x89293020U &&
4751+ add == 0x8A010005U &&
4752+ jmpl == 0x89C14000U &&
4753+ nop == 0x01000000U)
4754+ {
4755+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4756+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4757+ regs->u_regs[UREG_G4] <<= 32;
4758+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4759+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4760+ regs->u_regs[UREG_G4] = addr + 24;
4761+ addr = regs->u_regs[UREG_G5];
4762+ regs->tpc = addr;
4763+ regs->tnpc = addr+4;
4764+ return 3;
4765+ }
4766+ }
4767+ } while (0);
4768+
4769+#ifdef CONFIG_PAX_DLRESOLVE
4770+ do { /* PaX: unpatched PLT emulation step 2 */
4771+ unsigned int save, call, nop;
4772+
4773+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4774+ err |= get_user(call, (unsigned int *)regs->tpc);
4775+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4776+ if (err)
4777+ break;
4778+
4779+ if (save == 0x9DE3BFA8U &&
4780+ (call & 0xC0000000U) == 0x40000000U &&
4781+ nop == 0x01000000U)
4782+ {
4783+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4784+
4785+ if (test_thread_flag(TIF_32BIT))
4786+ dl_resolve &= 0xFFFFFFFFUL;
4787+
4788+ regs->u_regs[UREG_RETPC] = regs->tpc;
4789+ regs->tpc = dl_resolve;
4790+ regs->tnpc = dl_resolve+4;
4791+ return 3;
4792+ }
4793+ } while (0);
4794+#endif
4795+
4796+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4797+ unsigned int sethi, ba, nop;
4798+
4799+ err = get_user(sethi, (unsigned int *)regs->tpc);
4800+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4801+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4802+
4803+ if (err)
4804+ break;
4805+
4806+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4807+ (ba & 0xFFF00000U) == 0x30600000U &&
4808+ nop == 0x01000000U)
4809+ {
4810+ unsigned long addr;
4811+
4812+ addr = (sethi & 0x003FFFFFU) << 10;
4813+ regs->u_regs[UREG_G1] = addr;
4814+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4815+
4816+ if (test_thread_flag(TIF_32BIT))
4817+ addr &= 0xFFFFFFFFUL;
4818+
4819+ regs->tpc = addr;
4820+ regs->tnpc = addr+4;
4821+ return 2;
4822+ }
4823+ } while (0);
4824+
4825+#endif
4826+
4827+ return 1;
4828+}
4829+
4830+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4831+{
4832+ unsigned long i;
4833+
4834+ printk(KERN_ERR "PAX: bytes at PC: ");
4835+ for (i = 0; i < 8; i++) {
4836+ unsigned int c;
4837+ if (get_user(c, (unsigned int *)pc+i))
4838+ printk(KERN_CONT "???????? ");
4839+ else
4840+ printk(KERN_CONT "%08x ", c);
4841+ }
4842+ printk("\n");
4843+}
4844+#endif
4845+
4846 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4847 {
4848 struct mm_struct *mm = current->mm;
4849@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4850 if (!vma)
4851 goto bad_area;
4852
4853+#ifdef CONFIG_PAX_PAGEEXEC
4854+ /* PaX: detect ITLB misses on non-exec pages */
4855+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4856+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4857+ {
4858+ if (address != regs->tpc)
4859+ goto good_area;
4860+
4861+ up_read(&mm->mmap_sem);
4862+ switch (pax_handle_fetch_fault(regs)) {
4863+
4864+#ifdef CONFIG_PAX_EMUPLT
4865+ case 2:
4866+ case 3:
4867+ return;
4868+#endif
4869+
4870+ }
4871+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4872+ do_group_exit(SIGKILL);
4873+ }
4874+#endif
4875+
4876 /* Pure DTLB misses do not tell us whether the fault causing
4877 * load/store/atomic was a write or not, it only says that there
4878 * was no match. So in such a case we (carefully) read the
4879diff -urNp linux-3.1.1/arch/sparc/mm/hugetlbpage.c linux-3.1.1/arch/sparc/mm/hugetlbpage.c
4880--- linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
4881+++ linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
4882@@ -68,7 +68,7 @@ full_search:
4883 }
4884 return -ENOMEM;
4885 }
4886- if (likely(!vma || addr + len <= vma->vm_start)) {
4887+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4888 /*
4889 * Remember the place where we stopped the search:
4890 */
4891@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4892 /* make sure it can fit in the remaining address space */
4893 if (likely(addr > len)) {
4894 vma = find_vma(mm, addr-len);
4895- if (!vma || addr <= vma->vm_start) {
4896+ if (check_heap_stack_gap(vma, addr - len, len)) {
4897 /* remember the address as a hint for next time */
4898 return (mm->free_area_cache = addr-len);
4899 }
4900@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4901 if (unlikely(mm->mmap_base < len))
4902 goto bottomup;
4903
4904- addr = (mm->mmap_base-len) & HPAGE_MASK;
4905+ addr = mm->mmap_base - len;
4906
4907 do {
4908+ addr &= HPAGE_MASK;
4909 /*
4910 * Lookup failure means no vma is above this address,
4911 * else if new region fits below vma->vm_start,
4912 * return with success:
4913 */
4914 vma = find_vma(mm, addr);
4915- if (likely(!vma || addr+len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /* remember the address as a hint for next time */
4918 return (mm->free_area_cache = addr);
4919 }
4920@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4921 mm->cached_hole_size = vma->vm_start - addr;
4922
4923 /* try just below the current vma->vm_start */
4924- addr = (vma->vm_start-len) & HPAGE_MASK;
4925- } while (likely(len < vma->vm_start));
4926+ addr = skip_heap_stack_gap(vma, len);
4927+ } while (!IS_ERR_VALUE(addr));
4928
4929 bottomup:
4930 /*
4931@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4932 if (addr) {
4933 addr = ALIGN(addr, HPAGE_SIZE);
4934 vma = find_vma(mm, addr);
4935- if (task_size - len >= addr &&
4936- (!vma || addr + len <= vma->vm_start))
4937+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4938 return addr;
4939 }
4940 if (mm->get_unmapped_area == arch_get_unmapped_area)
4941diff -urNp linux-3.1.1/arch/sparc/mm/init_32.c linux-3.1.1/arch/sparc/mm/init_32.c
4942--- linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
4943+++ linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
4944@@ -316,6 +316,9 @@ extern void device_scan(void);
4945 pgprot_t PAGE_SHARED __read_mostly;
4946 EXPORT_SYMBOL(PAGE_SHARED);
4947
4948+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4949+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4950+
4951 void __init paging_init(void)
4952 {
4953 switch(sparc_cpu_model) {
4954@@ -344,17 +347,17 @@ void __init paging_init(void)
4955
4956 /* Initialize the protection map with non-constant, MMU dependent values. */
4957 protection_map[0] = PAGE_NONE;
4958- protection_map[1] = PAGE_READONLY;
4959- protection_map[2] = PAGE_COPY;
4960- protection_map[3] = PAGE_COPY;
4961+ protection_map[1] = PAGE_READONLY_NOEXEC;
4962+ protection_map[2] = PAGE_COPY_NOEXEC;
4963+ protection_map[3] = PAGE_COPY_NOEXEC;
4964 protection_map[4] = PAGE_READONLY;
4965 protection_map[5] = PAGE_READONLY;
4966 protection_map[6] = PAGE_COPY;
4967 protection_map[7] = PAGE_COPY;
4968 protection_map[8] = PAGE_NONE;
4969- protection_map[9] = PAGE_READONLY;
4970- protection_map[10] = PAGE_SHARED;
4971- protection_map[11] = PAGE_SHARED;
4972+ protection_map[9] = PAGE_READONLY_NOEXEC;
4973+ protection_map[10] = PAGE_SHARED_NOEXEC;
4974+ protection_map[11] = PAGE_SHARED_NOEXEC;
4975 protection_map[12] = PAGE_READONLY;
4976 protection_map[13] = PAGE_READONLY;
4977 protection_map[14] = PAGE_SHARED;
4978diff -urNp linux-3.1.1/arch/sparc/mm/Makefile linux-3.1.1/arch/sparc/mm/Makefile
4979--- linux-3.1.1/arch/sparc/mm/Makefile 2011-11-11 15:19:27.000000000 -0500
4980+++ linux-3.1.1/arch/sparc/mm/Makefile 2011-11-16 18:39:07.000000000 -0500
4981@@ -2,7 +2,7 @@
4982 #
4983
4984 asflags-y := -ansi
4985-ccflags-y := -Werror
4986+#ccflags-y := -Werror
4987
4988 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4989 obj-y += fault_$(BITS).o
4990diff -urNp linux-3.1.1/arch/sparc/mm/srmmu.c linux-3.1.1/arch/sparc/mm/srmmu.c
4991--- linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-11 15:19:27.000000000 -0500
4992+++ linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-16 18:39:07.000000000 -0500
4993@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
4994 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
4995 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
4996 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
4997+
4998+#ifdef CONFIG_PAX_PAGEEXEC
4999+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5000+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5001+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5002+#endif
5003+
5004 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5005 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5006
5007diff -urNp linux-3.1.1/arch/um/include/asm/kmap_types.h linux-3.1.1/arch/um/include/asm/kmap_types.h
5008--- linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
5009+++ linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
5010@@ -23,6 +23,7 @@ enum km_type {
5011 KM_IRQ1,
5012 KM_SOFTIRQ0,
5013 KM_SOFTIRQ1,
5014+ KM_CLEARPAGE,
5015 KM_TYPE_NR
5016 };
5017
5018diff -urNp linux-3.1.1/arch/um/include/asm/page.h linux-3.1.1/arch/um/include/asm/page.h
5019--- linux-3.1.1/arch/um/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
5020+++ linux-3.1.1/arch/um/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
5021@@ -14,6 +14,9 @@
5022 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5023 #define PAGE_MASK (~(PAGE_SIZE-1))
5024
5025+#define ktla_ktva(addr) (addr)
5026+#define ktva_ktla(addr) (addr)
5027+
5028 #ifndef __ASSEMBLY__
5029
5030 struct page;
5031diff -urNp linux-3.1.1/arch/um/kernel/process.c linux-3.1.1/arch/um/kernel/process.c
5032--- linux-3.1.1/arch/um/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
5033+++ linux-3.1.1/arch/um/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
5034@@ -404,22 +404,6 @@ int singlestepping(void * t)
5035 return 2;
5036 }
5037
5038-/*
5039- * Only x86 and x86_64 have an arch_align_stack().
5040- * All other arches have "#define arch_align_stack(x) (x)"
5041- * in their asm/system.h
5042- * As this is included in UML from asm-um/system-generic.h,
5043- * we can use it to behave as the subarch does.
5044- */
5045-#ifndef arch_align_stack
5046-unsigned long arch_align_stack(unsigned long sp)
5047-{
5048- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5049- sp -= get_random_int() % 8192;
5050- return sp & ~0xf;
5051-}
5052-#endif
5053-
5054 unsigned long get_wchan(struct task_struct *p)
5055 {
5056 unsigned long stack_page, sp, ip;
5057diff -urNp linux-3.1.1/arch/um/Makefile linux-3.1.1/arch/um/Makefile
5058--- linux-3.1.1/arch/um/Makefile 2011-11-11 15:19:27.000000000 -0500
5059+++ linux-3.1.1/arch/um/Makefile 2011-11-16 18:39:07.000000000 -0500
5060@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5061 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5062 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5063
5064+ifdef CONSTIFY_PLUGIN
5065+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5066+endif
5067+
5068 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5069
5070 #This will adjust *FLAGS accordingly to the platform.
5071diff -urNp linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h
5072--- linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5073+++ linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5074@@ -17,7 +17,7 @@
5075 # define AT_VECTOR_SIZE_ARCH 1
5076 #endif
5077
5078-extern unsigned long arch_align_stack(unsigned long sp);
5079+#define arch_align_stack(x) ((x) & ~0xfUL)
5080
5081 void default_idle(void);
5082
5083diff -urNp linux-3.1.1/arch/um/sys-i386/syscalls.c linux-3.1.1/arch/um/sys-i386/syscalls.c
5084--- linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-11 15:19:27.000000000 -0500
5085+++ linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-16 18:39:07.000000000 -0500
5086@@ -11,6 +11,21 @@
5087 #include "asm/uaccess.h"
5088 #include "asm/unistd.h"
5089
5090+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5091+{
5092+ unsigned long pax_task_size = TASK_SIZE;
5093+
5094+#ifdef CONFIG_PAX_SEGMEXEC
5095+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5096+ pax_task_size = SEGMEXEC_TASK_SIZE;
5097+#endif
5098+
5099+ if (len > pax_task_size || addr > pax_task_size - len)
5100+ return -EINVAL;
5101+
5102+ return 0;
5103+}
5104+
5105 /*
5106 * The prototype on i386 is:
5107 *
5108diff -urNp linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h
5109--- linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5110+++ linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5111@@ -17,7 +17,7 @@
5112 # define AT_VECTOR_SIZE_ARCH 1
5113 #endif
5114
5115-extern unsigned long arch_align_stack(unsigned long sp);
5116+#define arch_align_stack(x) ((x) & ~0xfUL)
5117
5118 void default_idle(void);
5119
5120diff -urNp linux-3.1.1/arch/x86/boot/bitops.h linux-3.1.1/arch/x86/boot/bitops.h
5121--- linux-3.1.1/arch/x86/boot/bitops.h 2011-11-11 15:19:27.000000000 -0500
5122+++ linux-3.1.1/arch/x86/boot/bitops.h 2011-11-16 18:39:07.000000000 -0500
5123@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5124 u8 v;
5125 const u32 *p = (const u32 *)addr;
5126
5127- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5128+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5129 return v;
5130 }
5131
5132@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5133
5134 static inline void set_bit(int nr, void *addr)
5135 {
5136- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5137+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5138 }
5139
5140 #endif /* BOOT_BITOPS_H */
5141diff -urNp linux-3.1.1/arch/x86/boot/boot.h linux-3.1.1/arch/x86/boot/boot.h
5142--- linux-3.1.1/arch/x86/boot/boot.h 2011-11-11 15:19:27.000000000 -0500
5143+++ linux-3.1.1/arch/x86/boot/boot.h 2011-11-16 18:39:07.000000000 -0500
5144@@ -85,7 +85,7 @@ static inline void io_delay(void)
5145 static inline u16 ds(void)
5146 {
5147 u16 seg;
5148- asm("movw %%ds,%0" : "=rm" (seg));
5149+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5150 return seg;
5151 }
5152
5153@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5154 static inline int memcmp(const void *s1, const void *s2, size_t len)
5155 {
5156 u8 diff;
5157- asm("repe; cmpsb; setnz %0"
5158+ asm volatile("repe; cmpsb; setnz %0"
5159 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5160 return diff;
5161 }
5162diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_32.S linux-3.1.1/arch/x86/boot/compressed/head_32.S
5163--- linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-11 15:19:27.000000000 -0500
5164+++ linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-16 18:39:07.000000000 -0500
5165@@ -76,7 +76,7 @@ ENTRY(startup_32)
5166 notl %eax
5167 andl %eax, %ebx
5168 #else
5169- movl $LOAD_PHYSICAL_ADDR, %ebx
5170+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5171 #endif
5172
5173 /* Target address to relocate to for decompression */
5174@@ -162,7 +162,7 @@ relocated:
5175 * and where it was actually loaded.
5176 */
5177 movl %ebp, %ebx
5178- subl $LOAD_PHYSICAL_ADDR, %ebx
5179+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5180 jz 2f /* Nothing to be done if loaded at compiled addr. */
5181 /*
5182 * Process relocations.
5183@@ -170,8 +170,7 @@ relocated:
5184
5185 1: subl $4, %edi
5186 movl (%edi), %ecx
5187- testl %ecx, %ecx
5188- jz 2f
5189+ jecxz 2f
5190 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5191 jmp 1b
5192 2:
5193diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_64.S linux-3.1.1/arch/x86/boot/compressed/head_64.S
5194--- linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-11 15:19:27.000000000 -0500
5195+++ linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-16 18:39:07.000000000 -0500
5196@@ -91,7 +91,7 @@ ENTRY(startup_32)
5197 notl %eax
5198 andl %eax, %ebx
5199 #else
5200- movl $LOAD_PHYSICAL_ADDR, %ebx
5201+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205@@ -233,7 +233,7 @@ ENTRY(startup_64)
5206 notq %rax
5207 andq %rax, %rbp
5208 #else
5209- movq $LOAD_PHYSICAL_ADDR, %rbp
5210+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5211 #endif
5212
5213 /* Target address to relocate to for decompression */
5214diff -urNp linux-3.1.1/arch/x86/boot/compressed/Makefile linux-3.1.1/arch/x86/boot/compressed/Makefile
5215--- linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-11 15:19:27.000000000 -0500
5216+++ linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-16 18:39:07.000000000 -0500
5217@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5218 KBUILD_CFLAGS += $(cflags-y)
5219 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5220 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5221+ifdef CONSTIFY_PLUGIN
5222+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5223+endif
5224
5225 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5226 GCOV_PROFILE := n
5227diff -urNp linux-3.1.1/arch/x86/boot/compressed/misc.c linux-3.1.1/arch/x86/boot/compressed/misc.c
5228--- linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-11 15:19:27.000000000 -0500
5229+++ linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-16 18:39:07.000000000 -0500
5230@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5231 case PT_LOAD:
5232 #ifdef CONFIG_RELOCATABLE
5233 dest = output;
5234- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5235+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5236 #else
5237 dest = (void *)(phdr->p_paddr);
5238 #endif
5239@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5240 error("Destination address too large");
5241 #endif
5242 #ifndef CONFIG_RELOCATABLE
5243- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5244+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5245 error("Wrong destination address");
5246 #endif
5247
5248diff -urNp linux-3.1.1/arch/x86/boot/compressed/relocs.c linux-3.1.1/arch/x86/boot/compressed/relocs.c
5249--- linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-11 15:19:27.000000000 -0500
5250+++ linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-16 18:39:07.000000000 -0500
5251@@ -13,8 +13,11 @@
5252
5253 static void die(char *fmt, ...);
5254
5255+#include "../../../../include/generated/autoconf.h"
5256+
5257 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5258 static Elf32_Ehdr ehdr;
5259+static Elf32_Phdr *phdr;
5260 static unsigned long reloc_count, reloc_idx;
5261 static unsigned long *relocs;
5262
5263@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5264 }
5265 }
5266
5267+static void read_phdrs(FILE *fp)
5268+{
5269+ unsigned int i;
5270+
5271+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5272+ if (!phdr) {
5273+ die("Unable to allocate %d program headers\n",
5274+ ehdr.e_phnum);
5275+ }
5276+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5277+ die("Seek to %d failed: %s\n",
5278+ ehdr.e_phoff, strerror(errno));
5279+ }
5280+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5281+ die("Cannot read ELF program headers: %s\n",
5282+ strerror(errno));
5283+ }
5284+ for(i = 0; i < ehdr.e_phnum; i++) {
5285+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5286+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5287+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5288+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5289+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5290+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5291+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5292+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5293+ }
5294+
5295+}
5296+
5297 static void read_shdrs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 Elf32_Shdr shdr;
5302
5303 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5304@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5305
5306 static void read_strtabs(FILE *fp)
5307 {
5308- int i;
5309+ unsigned int i;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_STRTAB) {
5313@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5314
5315 static void read_symtabs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319 for (i = 0; i < ehdr.e_shnum; i++) {
5320 struct section *sec = &secs[i];
5321 if (sec->shdr.sh_type != SHT_SYMTAB) {
5322@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5323
5324 static void read_relocs(FILE *fp)
5325 {
5326- int i,j;
5327+ unsigned int i,j;
5328+ uint32_t base;
5329+
5330 for (i = 0; i < ehdr.e_shnum; i++) {
5331 struct section *sec = &secs[i];
5332 if (sec->shdr.sh_type != SHT_REL) {
5333@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5334 die("Cannot read symbol table: %s\n",
5335 strerror(errno));
5336 }
5337+ base = 0;
5338+ for (j = 0; j < ehdr.e_phnum; j++) {
5339+ if (phdr[j].p_type != PT_LOAD )
5340+ continue;
5341+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5342+ continue;
5343+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5344+ break;
5345+ }
5346 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5347 Elf32_Rel *rel = &sec->reltab[j];
5348- rel->r_offset = elf32_to_cpu(rel->r_offset);
5349+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5350 rel->r_info = elf32_to_cpu(rel->r_info);
5351 }
5352 }
5353@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5354
5355 static void print_absolute_symbols(void)
5356 {
5357- int i;
5358+ unsigned int i;
5359 printf("Absolute symbols\n");
5360 printf(" Num: Value Size Type Bind Visibility Name\n");
5361 for (i = 0; i < ehdr.e_shnum; i++) {
5362 struct section *sec = &secs[i];
5363 char *sym_strtab;
5364 Elf32_Sym *sh_symtab;
5365- int j;
5366+ unsigned int j;
5367
5368 if (sec->shdr.sh_type != SHT_SYMTAB) {
5369 continue;
5370@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5371
5372 static void print_absolute_relocs(void)
5373 {
5374- int i, printed = 0;
5375+ unsigned int i, printed = 0;
5376
5377 for (i = 0; i < ehdr.e_shnum; i++) {
5378 struct section *sec = &secs[i];
5379 struct section *sec_applies, *sec_symtab;
5380 char *sym_strtab;
5381 Elf32_Sym *sh_symtab;
5382- int j;
5383+ unsigned int j;
5384 if (sec->shdr.sh_type != SHT_REL) {
5385 continue;
5386 }
5387@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5388
5389 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5390 {
5391- int i;
5392+ unsigned int i;
5393 /* Walk through the relocations */
5394 for (i = 0; i < ehdr.e_shnum; i++) {
5395 char *sym_strtab;
5396 Elf32_Sym *sh_symtab;
5397 struct section *sec_applies, *sec_symtab;
5398- int j;
5399+ unsigned int j;
5400 struct section *sec = &secs[i];
5401
5402 if (sec->shdr.sh_type != SHT_REL) {
5403@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5404 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5405 continue;
5406 }
5407+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5408+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5409+ continue;
5410+
5411+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5412+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5413+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5414+ continue;
5415+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5416+ continue;
5417+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5418+ continue;
5419+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5420+ continue;
5421+#endif
5422+
5423 switch (r_type) {
5424 case R_386_NONE:
5425 case R_386_PC32:
5426@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5427
5428 static void emit_relocs(int as_text)
5429 {
5430- int i;
5431+ unsigned int i;
5432 /* Count how many relocations I have and allocate space for them. */
5433 reloc_count = 0;
5434 walk_relocs(count_reloc);
5435@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5436 fname, strerror(errno));
5437 }
5438 read_ehdr(fp);
5439+ read_phdrs(fp);
5440 read_shdrs(fp);
5441 read_strtabs(fp);
5442 read_symtabs(fp);
5443diff -urNp linux-3.1.1/arch/x86/boot/cpucheck.c linux-3.1.1/arch/x86/boot/cpucheck.c
5444--- linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-11 15:19:27.000000000 -0500
5445+++ linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-16 18:39:07.000000000 -0500
5446@@ -74,7 +74,7 @@ static int has_fpu(void)
5447 u16 fcw = -1, fsw = -1;
5448 u32 cr0;
5449
5450- asm("movl %%cr0,%0" : "=r" (cr0));
5451+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5452 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5453 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5454 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5455@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5456 {
5457 u32 f0, f1;
5458
5459- asm("pushfl ; "
5460+ asm volatile("pushfl ; "
5461 "pushfl ; "
5462 "popl %0 ; "
5463 "movl %0,%1 ; "
5464@@ -115,7 +115,7 @@ static void get_flags(void)
5465 set_bit(X86_FEATURE_FPU, cpu.flags);
5466
5467 if (has_eflag(X86_EFLAGS_ID)) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (max_intel_level),
5471 "=b" (cpu_vendor[0]),
5472 "=d" (cpu_vendor[1]),
5473@@ -124,7 +124,7 @@ static void get_flags(void)
5474
5475 if (max_intel_level >= 0x00000001 &&
5476 max_intel_level <= 0x0000ffff) {
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (tfms),
5480 "=c" (cpu.flags[4]),
5481 "=d" (cpu.flags[0])
5482@@ -136,7 +136,7 @@ static void get_flags(void)
5483 cpu.model += ((tfms >> 16) & 0xf) << 4;
5484 }
5485
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "=a" (max_amd_level)
5489 : "a" (0x80000000)
5490 : "ebx", "ecx", "edx");
5491@@ -144,7 +144,7 @@ static void get_flags(void)
5492 if (max_amd_level >= 0x80000001 &&
5493 max_amd_level <= 0x8000ffff) {
5494 u32 eax = 0x80000001;
5495- asm("cpuid"
5496+ asm volatile("cpuid"
5497 : "+a" (eax),
5498 "=c" (cpu.flags[6]),
5499 "=d" (cpu.flags[1])
5500@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5501 u32 ecx = MSR_K7_HWCR;
5502 u32 eax, edx;
5503
5504- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5505+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5506 eax &= ~(1 << 15);
5507- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5508+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5509
5510 get_flags(); /* Make sure it really did something */
5511 err = check_flags();
5512@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5513 u32 ecx = MSR_VIA_FCR;
5514 u32 eax, edx;
5515
5516- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5517+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5518 eax |= (1<<1)|(1<<7);
5519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5521
5522 set_bit(X86_FEATURE_CX8, cpu.flags);
5523 err = check_flags();
5524@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5525 u32 eax, edx;
5526 u32 level = 1;
5527
5528- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5529- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5530- asm("cpuid"
5531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5532+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5533+ asm volatile("cpuid"
5534 : "+a" (level), "=d" (cpu.flags[0])
5535 : : "ecx", "ebx");
5536- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5537+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538
5539 err = check_flags();
5540 }
5541diff -urNp linux-3.1.1/arch/x86/boot/header.S linux-3.1.1/arch/x86/boot/header.S
5542--- linux-3.1.1/arch/x86/boot/header.S 2011-11-11 15:19:27.000000000 -0500
5543+++ linux-3.1.1/arch/x86/boot/header.S 2011-11-16 18:39:07.000000000 -0500
5544@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5545 # single linked list of
5546 # struct setup_data
5547
5548-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5549+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5550
5551 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5552 #define VO_INIT_SIZE (VO__end - VO__text)
5553diff -urNp linux-3.1.1/arch/x86/boot/Makefile linux-3.1.1/arch/x86/boot/Makefile
5554--- linux-3.1.1/arch/x86/boot/Makefile 2011-11-11 15:19:27.000000000 -0500
5555+++ linux-3.1.1/arch/x86/boot/Makefile 2011-11-16 18:39:07.000000000 -0500
5556@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5557 $(call cc-option, -fno-stack-protector) \
5558 $(call cc-option, -mpreferred-stack-boundary=2)
5559 KBUILD_CFLAGS += $(call cc-option, -m32)
5560+ifdef CONSTIFY_PLUGIN
5561+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5562+endif
5563 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5564 GCOV_PROFILE := n
5565
5566diff -urNp linux-3.1.1/arch/x86/boot/memory.c linux-3.1.1/arch/x86/boot/memory.c
5567--- linux-3.1.1/arch/x86/boot/memory.c 2011-11-11 15:19:27.000000000 -0500
5568+++ linux-3.1.1/arch/x86/boot/memory.c 2011-11-16 18:39:07.000000000 -0500
5569@@ -19,7 +19,7 @@
5570
5571 static int detect_memory_e820(void)
5572 {
5573- int count = 0;
5574+ unsigned int count = 0;
5575 struct biosregs ireg, oreg;
5576 struct e820entry *desc = boot_params.e820_map;
5577 static struct e820entry buf; /* static so it is zeroed */
5578diff -urNp linux-3.1.1/arch/x86/boot/video.c linux-3.1.1/arch/x86/boot/video.c
5579--- linux-3.1.1/arch/x86/boot/video.c 2011-11-11 15:19:27.000000000 -0500
5580+++ linux-3.1.1/arch/x86/boot/video.c 2011-11-16 18:39:07.000000000 -0500
5581@@ -96,7 +96,7 @@ static void store_mode_params(void)
5582 static unsigned int get_entry(void)
5583 {
5584 char entry_buf[4];
5585- int i, len = 0;
5586+ unsigned int i, len = 0;
5587 int key;
5588 unsigned int v;
5589
5590diff -urNp linux-3.1.1/arch/x86/boot/video-vesa.c linux-3.1.1/arch/x86/boot/video-vesa.c
5591--- linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-11 15:19:27.000000000 -0500
5592+++ linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-16 18:39:07.000000000 -0500
5593@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5594
5595 boot_params.screen_info.vesapm_seg = oreg.es;
5596 boot_params.screen_info.vesapm_off = oreg.di;
5597+ boot_params.screen_info.vesapm_size = oreg.cx;
5598 }
5599
5600 /*
5601diff -urNp linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S
5602--- linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5603+++ linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5604@@ -8,6 +8,8 @@
5605 * including this sentence is retained in full.
5606 */
5607
5608+#include <asm/alternative-asm.h>
5609+
5610 .extern crypto_ft_tab
5611 .extern crypto_it_tab
5612 .extern crypto_fl_tab
5613@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5614 je B192; \
5615 leaq 32(r9),r9;
5616
5617+#define ret pax_force_retaddr; ret
5618+
5619 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5620 movq r1,r2; \
5621 movq r3,r4; \
5622diff -urNp linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S
5623--- linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5624+++ linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5625@@ -1,3 +1,5 @@
5626+#include <asm/alternative-asm.h>
5627+
5628 # enter ECRYPT_encrypt_bytes
5629 .text
5630 .p2align 5
5631@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5632 add %r11,%rsp
5633 mov %rdi,%rax
5634 mov %rsi,%rdx
5635+ pax_force_retaddr
5636 ret
5637 # bytesatleast65:
5638 ._bytesatleast65:
5639@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5640 add %r11,%rsp
5641 mov %rdi,%rax
5642 mov %rsi,%rdx
5643+ pax_force_retaddr
5644 ret
5645 # enter ECRYPT_ivsetup
5646 .text
5647@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5648 add %r11,%rsp
5649 mov %rdi,%rax
5650 mov %rsi,%rdx
5651+ pax_force_retaddr
5652 ret
5653diff -urNp linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S
5654--- linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5655+++ linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5656@@ -21,6 +21,7 @@
5657 .text
5658
5659 #include <asm/asm-offsets.h>
5660+#include <asm/alternative-asm.h>
5661
5662 #define a_offset 0
5663 #define b_offset 4
5664@@ -269,6 +270,7 @@ twofish_enc_blk:
5665
5666 popq R1
5667 movq $1,%rax
5668+ pax_force_retaddr
5669 ret
5670
5671 twofish_dec_blk:
5672@@ -321,4 +323,5 @@ twofish_dec_blk:
5673
5674 popq R1
5675 movq $1,%rax
5676+ pax_force_retaddr
5677 ret
5678diff -urNp linux-3.1.1/arch/x86/ia32/ia32_aout.c linux-3.1.1/arch/x86/ia32/ia32_aout.c
5679--- linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-11 15:19:27.000000000 -0500
5680+++ linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-16 18:40:08.000000000 -0500
5681@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5682 unsigned long dump_start, dump_size;
5683 struct user32 dump;
5684
5685+ memset(&dump, 0, sizeof(dump));
5686+
5687 fs = get_fs();
5688 set_fs(KERNEL_DS);
5689 has_dumped = 1;
5690diff -urNp linux-3.1.1/arch/x86/ia32/ia32entry.S linux-3.1.1/arch/x86/ia32/ia32entry.S
5691--- linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-11 15:19:27.000000000 -0500
5692+++ linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-16 18:40:08.000000000 -0500
5693@@ -13,7 +13,9 @@
5694 #include <asm/thread_info.h>
5695 #include <asm/segment.h>
5696 #include <asm/irqflags.h>
5697+#include <asm/pgtable.h>
5698 #include <linux/linkage.h>
5699+#include <asm/alternative-asm.h>
5700
5701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5702 #include <linux/elf-em.h>
5703@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5704 ENDPROC(native_irq_enable_sysexit)
5705 #endif
5706
5707+ .macro pax_enter_kernel_user
5708+#ifdef CONFIG_PAX_MEMORY_UDEREF
5709+ call pax_enter_kernel_user
5710+#endif
5711+ .endm
5712+
5713+ .macro pax_exit_kernel_user
5714+#ifdef CONFIG_PAX_MEMORY_UDEREF
5715+ call pax_exit_kernel_user
5716+#endif
5717+#ifdef CONFIG_PAX_RANDKSTACK
5718+ pushq %rax
5719+ call pax_randomize_kstack
5720+ popq %rax
5721+#endif
5722+ .endm
5723+
5724+ .macro pax_erase_kstack
5725+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5726+ call pax_erase_kstack
5727+#endif
5728+ .endm
5729+
5730 /*
5731 * 32bit SYSENTER instruction entry.
5732 *
5733@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REGISTER rsp,rbp
5735 SWAPGS_UNSAFE_STACK
5736 movq PER_CPU_VAR(kernel_stack), %rsp
5737- addq $(KERNEL_STACK_OFFSET),%rsp
5738+ pax_enter_kernel_user
5739 /*
5740 * No need to follow this irqs on/off section: the syscall
5741 * disabled irqs, here we enable it straight after entry:
5742@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5743 CFI_REL_OFFSET rsp,0
5744 pushfq_cfi
5745 /*CFI_REL_OFFSET rflags,0*/
5746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5747+ GET_THREAD_INFO(%r10)
5748+ movl TI_sysenter_return(%r10), %r10d
5749 CFI_REGISTER rip,r10
5750 pushq_cfi $__USER32_CS
5751 /*CFI_REL_OFFSET cs,0*/
5752@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5753 SAVE_ARGS 0,1,0
5754 /* no need to do an access_ok check here because rbp has been
5755 32bit zero extended */
5756+
5757+#ifdef CONFIG_PAX_MEMORY_UDEREF
5758+ mov $PAX_USER_SHADOW_BASE,%r10
5759+ add %r10,%rbp
5760+#endif
5761+
5762 1: movl (%rbp),%ebp
5763 .section __ex_table,"a"
5764 .quad 1b,ia32_badarg
5765@@ -168,6 +200,8 @@ sysenter_dispatch:
5766 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5767 jnz sysexit_audit
5768 sysexit_from_sys_call:
5769+ pax_exit_kernel_user
5770+ pax_erase_kstack
5771 andl $~TS_COMPAT,TI_status(%r10)
5772 /* clear IF, that popfq doesn't enable interrupts early */
5773 andl $~0x200,EFLAGS-R11(%rsp)
5774@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5775 movl %eax,%esi /* 2nd arg: syscall number */
5776 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5777 call audit_syscall_entry
5778+
5779+ pax_erase_kstack
5780+
5781 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5782 cmpq $(IA32_NR_syscalls-1),%rax
5783 ja ia32_badsys
5784@@ -246,6 +283,9 @@ sysenter_tracesys:
5785 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5786 movq %rsp,%rdi /* &pt_regs -> arg1 */
5787 call syscall_trace_enter
5788+
5789+ pax_erase_kstack
5790+
5791 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5792 RESTORE_REST
5793 cmpq $(IA32_NR_syscalls-1),%rax
5794@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5795 ENTRY(ia32_cstar_target)
5796 CFI_STARTPROC32 simple
5797 CFI_SIGNAL_FRAME
5798- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5799+ CFI_DEF_CFA rsp,0
5800 CFI_REGISTER rip,rcx
5801 /*CFI_REGISTER rflags,r11*/
5802 SWAPGS_UNSAFE_STACK
5803 movl %esp,%r8d
5804 CFI_REGISTER rsp,r8
5805 movq PER_CPU_VAR(kernel_stack),%rsp
5806+
5807+#ifdef CONFIG_PAX_MEMORY_UDEREF
5808+ pax_enter_kernel_user
5809+#endif
5810+
5811 /*
5812 * No need to follow this irqs on/off section: the syscall
5813 * disabled irqs and here we enable it straight after entry:
5814 */
5815 ENABLE_INTERRUPTS(CLBR_NONE)
5816- SAVE_ARGS 8,0,0
5817+ SAVE_ARGS 8*6,0,0
5818 movl %eax,%eax /* zero extension */
5819 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5820 movq %rcx,RIP-ARGOFFSET(%rsp)
5821@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5822 /* no need to do an access_ok check here because r8 has been
5823 32bit zero extended */
5824 /* hardware stack frame is complete now */
5825+
5826+#ifdef CONFIG_PAX_MEMORY_UDEREF
5827+ mov $PAX_USER_SHADOW_BASE,%r10
5828+ add %r10,%r8
5829+#endif
5830+
5831 1: movl (%r8),%r9d
5832 .section __ex_table,"a"
5833 .quad 1b,ia32_badarg
5834@@ -327,6 +378,8 @@ cstar_dispatch:
5835 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5836 jnz sysretl_audit
5837 sysretl_from_sys_call:
5838+ pax_exit_kernel_user
5839+ pax_erase_kstack
5840 andl $~TS_COMPAT,TI_status(%r10)
5841 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
5842 movl RIP-ARGOFFSET(%rsp),%ecx
5843@@ -364,6 +417,9 @@ cstar_tracesys:
5844 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5845 movq %rsp,%rdi /* &pt_regs -> arg1 */
5846 call syscall_trace_enter
5847+
5848+ pax_erase_kstack
5849+
5850 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5851 RESTORE_REST
5852 xchgl %ebp,%r9d
5853@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5854 CFI_REL_OFFSET rip,RIP-RIP
5855 PARAVIRT_ADJUST_EXCEPTION_FRAME
5856 SWAPGS
5857+ pax_enter_kernel_user
5858 /*
5859 * No need to follow this irqs on/off section: the syscall
5860 * disabled irqs and here we enable it straight after entry:
5861@@ -441,6 +498,9 @@ ia32_tracesys:
5862 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5863 movq %rsp,%rdi /* &pt_regs -> arg1 */
5864 call syscall_trace_enter
5865+
5866+ pax_erase_kstack
5867+
5868 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5869 RESTORE_REST
5870 cmpq $(IA32_NR_syscalls-1),%rax
5871@@ -455,6 +515,7 @@ ia32_badsys:
5872
5873 quiet_ni_syscall:
5874 movq $-ENOSYS,%rax
5875+ pax_force_retaddr
5876 ret
5877 CFI_ENDPROC
5878
5879diff -urNp linux-3.1.1/arch/x86/ia32/ia32_signal.c linux-3.1.1/arch/x86/ia32/ia32_signal.c
5880--- linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-11 15:19:27.000000000 -0500
5881+++ linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-16 18:39:07.000000000 -0500
5882@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const
5883 }
5884 seg = get_fs();
5885 set_fs(KERNEL_DS);
5886- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5887+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5888 set_fs(seg);
5889 if (ret >= 0 && uoss_ptr) {
5890 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5891@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct
5892 */
5893 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5894 size_t frame_size,
5895- void **fpstate)
5896+ void __user **fpstate)
5897 {
5898 unsigned long sp;
5899
5900@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct
5901
5902 if (used_math()) {
5903 sp = sp - sig_xstate_ia32_size;
5904- *fpstate = (struct _fpstate_ia32 *) sp;
5905+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5906 if (save_i387_xstate_ia32(*fpstate) < 0)
5907 return (void __user *) -1L;
5908 }
5909@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct
5910 sp -= frame_size;
5911 /* Align the stack pointer according to the i386 ABI,
5912 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5913- sp = ((sp + 4) & -16ul) - 4;
5914+ sp = ((sp - 12) & -16ul) - 4;
5915 return (void __user *) sp;
5916 }
5917
5918@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_s
5919 * These are actually not used anymore, but left because some
5920 * gdb versions depend on them as a marker.
5921 */
5922- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5923+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5924 } put_user_catch(err);
5925
5926 if (err)
5927@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct
5928 0xb8,
5929 __NR_ia32_rt_sigreturn,
5930 0x80cd,
5931- 0,
5932+ 0
5933 };
5934
5935 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5936@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct
5937
5938 if (ka->sa.sa_flags & SA_RESTORER)
5939 restorer = ka->sa.sa_restorer;
5940+ else if (current->mm->context.vdso)
5941+ /* Return stub is in 32bit vsyscall page */
5942+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5943 else
5944- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5945- rt_sigreturn);
5946+ restorer = &frame->retcode;
5947 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5948
5949 /*
5950 * Not actually used anymore, but left because some gdb
5951 * versions need it.
5952 */
5953- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5954+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5955 } put_user_catch(err);
5956
5957 if (err)
5958diff -urNp linux-3.1.1/arch/x86/ia32/sys_ia32.c linux-3.1.1/arch/x86/ia32/sys_ia32.c
5959--- linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-11 15:19:27.000000000 -0500
5960+++ linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-16 18:39:07.000000000 -0500
5961@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5962 */
5963 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5964 {
5965- typeof(ubuf->st_uid) uid = 0;
5966- typeof(ubuf->st_gid) gid = 0;
5967+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5968+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5969 SET_UID(uid, stat->uid);
5970 SET_GID(gid, stat->gid);
5971 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5972@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5973 }
5974 set_fs(KERNEL_DS);
5975 ret = sys_rt_sigprocmask(how,
5976- set ? (sigset_t __user *)&s : NULL,
5977- oset ? (sigset_t __user *)&s : NULL,
5978+ set ? (sigset_t __force_user *)&s : NULL,
5979+ oset ? (sigset_t __force_user *)&s : NULL,
5980 sigsetsize);
5981 set_fs(old_fs);
5982 if (ret)
5983@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5984 return alarm_setitimer(seconds);
5985 }
5986
5987-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5988+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5989 int options)
5990 {
5991 return compat_sys_wait4(pid, stat_addr, options, NULL);
5992@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5997+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5998 set_fs(old_fs);
5999 if (put_compat_timespec(&t, interval))
6000 return -EFAULT;
6001@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6002 mm_segment_t old_fs = get_fs();
6003
6004 set_fs(KERNEL_DS);
6005- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6006+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6007 set_fs(old_fs);
6008 if (!ret) {
6009 switch (_NSIG_WORDS) {
6010@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6011 if (copy_siginfo_from_user32(&info, uinfo))
6012 return -EFAULT;
6013 set_fs(KERNEL_DS);
6014- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6015+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6016 set_fs(old_fs);
6017 return ret;
6018 }
6019@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6020 return -EFAULT;
6021
6022 set_fs(KERNEL_DS);
6023- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6024+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6025 count);
6026 set_fs(old_fs);
6027
6028diff -urNp linux-3.1.1/arch/x86/include/asm/alternative-asm.h linux-3.1.1/arch/x86/include/asm/alternative-asm.h
6029--- linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
6030+++ linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-16 18:39:07.000000000 -0500
6031@@ -15,6 +15,20 @@
6032 .endm
6033 #endif
6034
6035+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6036+ .macro pax_force_retaddr rip=0
6037+ btsq $63,\rip(%rsp)
6038+ .endm
6039+ .macro pax_force_fptr ptr
6040+ btsq $63,\ptr
6041+ .endm
6042+#else
6043+ .macro pax_force_retaddr rip=0
6044+ .endm
6045+ .macro pax_force_fptr ptr
6046+ .endm
6047+#endif
6048+
6049 .macro altinstruction_entry orig alt feature orig_len alt_len
6050 .long \orig - .
6051 .long \alt - .
6052diff -urNp linux-3.1.1/arch/x86/include/asm/alternative.h linux-3.1.1/arch/x86/include/asm/alternative.h
6053--- linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-11 15:19:27.000000000 -0500
6054+++ linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-16 18:39:07.000000000 -0500
6055@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
6056 ".section .discard,\"aw\",@progbits\n" \
6057 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6058 ".previous\n" \
6059- ".section .altinstr_replacement, \"ax\"\n" \
6060+ ".section .altinstr_replacement, \"a\"\n" \
6061 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6062 ".previous"
6063
6064diff -urNp linux-3.1.1/arch/x86/include/asm/apic.h linux-3.1.1/arch/x86/include/asm/apic.h
6065--- linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-11 15:19:27.000000000 -0500
6066+++ linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-16 18:39:07.000000000 -0500
6067@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6068
6069 #ifdef CONFIG_X86_LOCAL_APIC
6070
6071-extern unsigned int apic_verbosity;
6072+extern int apic_verbosity;
6073 extern int local_apic_timer_c2_ok;
6074
6075 extern int disable_apic;
6076diff -urNp linux-3.1.1/arch/x86/include/asm/apm.h linux-3.1.1/arch/x86/include/asm/apm.h
6077--- linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-11 15:19:27.000000000 -0500
6078+++ linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-16 18:39:07.000000000 -0500
6079@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083- "lcall *%%cs:apm_bios_entry\n\t"
6084+ "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%al\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6089 __asm__ __volatile__(APM_DO_ZERO_SEGS
6090 "pushl %%edi\n\t"
6091 "pushl %%ebp\n\t"
6092- "lcall *%%cs:apm_bios_entry\n\t"
6093+ "lcall *%%ss:apm_bios_entry\n\t"
6094 "setc %%bl\n\t"
6095 "popl %%ebp\n\t"
6096 "popl %%edi\n\t"
6097diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_32.h linux-3.1.1/arch/x86/include/asm/atomic64_32.h
6098--- linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-11 15:19:27.000000000 -0500
6099+++ linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-16 18:39:07.000000000 -0500
6100@@ -12,6 +12,14 @@ typedef struct {
6101 u64 __aligned(8) counter;
6102 } atomic64_t;
6103
6104+#ifdef CONFIG_PAX_REFCOUNT
6105+typedef struct {
6106+ u64 __aligned(8) counter;
6107+} atomic64_unchecked_t;
6108+#else
6109+typedef atomic64_t atomic64_unchecked_t;
6110+#endif
6111+
6112 #define ATOMIC64_INIT(val) { (val) }
6113
6114 #ifdef CONFIG_X86_CMPXCHG64
6115@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6116 }
6117
6118 /**
6119+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6120+ * @p: pointer to type atomic64_unchecked_t
6121+ * @o: expected value
6122+ * @n: new value
6123+ *
6124+ * Atomically sets @v to @n if it was equal to @o and returns
6125+ * the old value.
6126+ */
6127+
6128+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6129+{
6130+ return cmpxchg64(&v->counter, o, n);
6131+}
6132+
6133+/**
6134 * atomic64_xchg - xchg atomic64 variable
6135 * @v: pointer to type atomic64_t
6136 * @n: value to assign
6137@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6138 }
6139
6140 /**
6141+ * atomic64_set_unchecked - set atomic64 variable
6142+ * @v: pointer to type atomic64_unchecked_t
6143+ * @n: value to assign
6144+ *
6145+ * Atomically sets the value of @v to @n.
6146+ */
6147+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6148+{
6149+ unsigned high = (unsigned)(i >> 32);
6150+ unsigned low = (unsigned)i;
6151+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6152+ : "+b" (low), "+c" (high)
6153+ : "S" (v)
6154+ : "eax", "edx", "memory"
6155+ );
6156+}
6157+
6158+/**
6159 * atomic64_read - read atomic64 variable
6160 * @v: pointer to type atomic64_t
6161 *
6162@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6163 }
6164
6165 /**
6166+ * atomic64_read_unchecked - read atomic64 variable
6167+ * @v: pointer to type atomic64_unchecked_t
6168+ *
6169+ * Atomically reads the value of @v and returns it.
6170+ */
6171+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6172+{
6173+ long long r;
6174+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6175+ : "=A" (r), "+c" (v)
6176+ : : "memory"
6177+ );
6178+ return r;
6179+ }
6180+
6181+/**
6182 * atomic64_add_return - add and return
6183 * @i: integer value to add
6184 * @v: pointer to type atomic64_t
6185@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6186 return i;
6187 }
6188
6189+/**
6190+ * atomic64_add_return_unchecked - add and return
6191+ * @i: integer value to add
6192+ * @v: pointer to type atomic64_unchecked_t
6193+ *
6194+ * Atomically adds @i to @v and returns @i + *@v
6195+ */
6196+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6197+{
6198+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6199+ : "+A" (i), "+c" (v)
6200+ : : "memory"
6201+ );
6202+ return i;
6203+}
6204+
6205 /*
6206 * Other variants with different arithmetic operators:
6207 */
6208@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6209 return a;
6210 }
6211
6212+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6213+{
6214+ long long a;
6215+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6216+ : "=A" (a)
6217+ : "S" (v)
6218+ : "memory", "ecx"
6219+ );
6220+ return a;
6221+}
6222+
6223 static inline long long atomic64_dec_return(atomic64_t *v)
6224 {
6225 long long a;
6226@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6227 }
6228
6229 /**
6230+ * atomic64_add_unchecked - add integer to atomic64 variable
6231+ * @i: integer value to add
6232+ * @v: pointer to type atomic64_unchecked_t
6233+ *
6234+ * Atomically adds @i to @v.
6235+ */
6236+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6237+{
6238+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6239+ : "+A" (i), "+c" (v)
6240+ : : "memory"
6241+ );
6242+ return i;
6243+}
6244+
6245+/**
6246 * atomic64_sub - subtract the atomic64 variable
6247 * @i: integer value to subtract
6248 * @v: pointer to type atomic64_t
6249diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_64.h linux-3.1.1/arch/x86/include/asm/atomic64_64.h
6250--- linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-11 15:19:27.000000000 -0500
6251+++ linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-16 18:39:07.000000000 -0500
6252@@ -18,7 +18,19 @@
6253 */
6254 static inline long atomic64_read(const atomic64_t *v)
6255 {
6256- return (*(volatile long *)&(v)->counter);
6257+ return (*(volatile const long *)&(v)->counter);
6258+}
6259+
6260+/**
6261+ * atomic64_read_unchecked - read atomic64 variable
6262+ * @v: pointer of type atomic64_unchecked_t
6263+ *
6264+ * Atomically reads the value of @v.
6265+ * Doesn't imply a read memory barrier.
6266+ */
6267+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6268+{
6269+ return (*(volatile const long *)&(v)->counter);
6270 }
6271
6272 /**
6273@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6274 }
6275
6276 /**
6277+ * atomic64_set_unchecked - set atomic64 variable
6278+ * @v: pointer to type atomic64_unchecked_t
6279+ * @i: required value
6280+ *
6281+ * Atomically sets the value of @v to @i.
6282+ */
6283+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6284+{
6285+ v->counter = i;
6286+}
6287+
6288+/**
6289 * atomic64_add - add integer to atomic64 variable
6290 * @i: integer value to add
6291 * @v: pointer to type atomic64_t
6292@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6293 */
6294 static inline void atomic64_add(long i, atomic64_t *v)
6295 {
6296+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6297+
6298+#ifdef CONFIG_PAX_REFCOUNT
6299+ "jno 0f\n"
6300+ LOCK_PREFIX "subq %1,%0\n"
6301+ "int $4\n0:\n"
6302+ _ASM_EXTABLE(0b, 0b)
6303+#endif
6304+
6305+ : "=m" (v->counter)
6306+ : "er" (i), "m" (v->counter));
6307+}
6308+
6309+/**
6310+ * atomic64_add_unchecked - add integer to atomic64 variable
6311+ * @i: integer value to add
6312+ * @v: pointer to type atomic64_unchecked_t
6313+ *
6314+ * Atomically adds @i to @v.
6315+ */
6316+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6317+{
6318 asm volatile(LOCK_PREFIX "addq %1,%0"
6319 : "=m" (v->counter)
6320 : "er" (i), "m" (v->counter));
6321@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6322 */
6323 static inline void atomic64_sub(long i, atomic64_t *v)
6324 {
6325- asm volatile(LOCK_PREFIX "subq %1,%0"
6326+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6327+
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ "jno 0f\n"
6330+ LOCK_PREFIX "addq %1,%0\n"
6331+ "int $4\n0:\n"
6332+ _ASM_EXTABLE(0b, 0b)
6333+#endif
6334+
6335+ : "=m" (v->counter)
6336+ : "er" (i), "m" (v->counter));
6337+}
6338+
6339+/**
6340+ * atomic64_sub_unchecked - subtract the atomic64 variable
6341+ * @i: integer value to subtract
6342+ * @v: pointer to type atomic64_unchecked_t
6343+ *
6344+ * Atomically subtracts @i from @v.
6345+ */
6346+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6347+{
6348+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6349 : "=m" (v->counter)
6350 : "er" (i), "m" (v->counter));
6351 }
6352@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6353 {
6354 unsigned char c;
6355
6356- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6357+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6358+
6359+#ifdef CONFIG_PAX_REFCOUNT
6360+ "jno 0f\n"
6361+ LOCK_PREFIX "addq %2,%0\n"
6362+ "int $4\n0:\n"
6363+ _ASM_EXTABLE(0b, 0b)
6364+#endif
6365+
6366+ "sete %1\n"
6367 : "=m" (v->counter), "=qm" (c)
6368 : "er" (i), "m" (v->counter) : "memory");
6369 return c;
6370@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6371 */
6372 static inline void atomic64_inc(atomic64_t *v)
6373 {
6374+ asm volatile(LOCK_PREFIX "incq %0\n"
6375+
6376+#ifdef CONFIG_PAX_REFCOUNT
6377+ "jno 0f\n"
6378+ LOCK_PREFIX "decq %0\n"
6379+ "int $4\n0:\n"
6380+ _ASM_EXTABLE(0b, 0b)
6381+#endif
6382+
6383+ : "=m" (v->counter)
6384+ : "m" (v->counter));
6385+}
6386+
6387+/**
6388+ * atomic64_inc_unchecked - increment atomic64 variable
6389+ * @v: pointer to type atomic64_unchecked_t
6390+ *
6391+ * Atomically increments @v by 1.
6392+ */
6393+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6394+{
6395 asm volatile(LOCK_PREFIX "incq %0"
6396 : "=m" (v->counter)
6397 : "m" (v->counter));
6398@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6399 */
6400 static inline void atomic64_dec(atomic64_t *v)
6401 {
6402- asm volatile(LOCK_PREFIX "decq %0"
6403+ asm volatile(LOCK_PREFIX "decq %0\n"
6404+
6405+#ifdef CONFIG_PAX_REFCOUNT
6406+ "jno 0f\n"
6407+ LOCK_PREFIX "incq %0\n"
6408+ "int $4\n0:\n"
6409+ _ASM_EXTABLE(0b, 0b)
6410+#endif
6411+
6412+ : "=m" (v->counter)
6413+ : "m" (v->counter));
6414+}
6415+
6416+/**
6417+ * atomic64_dec_unchecked - decrement atomic64 variable
6418+ * @v: pointer to type atomic64_t
6419+ *
6420+ * Atomically decrements @v by 1.
6421+ */
6422+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6423+{
6424+ asm volatile(LOCK_PREFIX "decq %0\n"
6425 : "=m" (v->counter)
6426 : "m" (v->counter));
6427 }
6428@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6429 {
6430 unsigned char c;
6431
6432- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6433+ asm volatile(LOCK_PREFIX "decq %0\n"
6434+
6435+#ifdef CONFIG_PAX_REFCOUNT
6436+ "jno 0f\n"
6437+ LOCK_PREFIX "incq %0\n"
6438+ "int $4\n0:\n"
6439+ _ASM_EXTABLE(0b, 0b)
6440+#endif
6441+
6442+ "sete %1\n"
6443 : "=m" (v->counter), "=qm" (c)
6444 : "m" (v->counter) : "memory");
6445 return c != 0;
6446@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6447 {
6448 unsigned char c;
6449
6450- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6451+ asm volatile(LOCK_PREFIX "incq %0\n"
6452+
6453+#ifdef CONFIG_PAX_REFCOUNT
6454+ "jno 0f\n"
6455+ LOCK_PREFIX "decq %0\n"
6456+ "int $4\n0:\n"
6457+ _ASM_EXTABLE(0b, 0b)
6458+#endif
6459+
6460+ "sete %1\n"
6461 : "=m" (v->counter), "=qm" (c)
6462 : "m" (v->counter) : "memory");
6463 return c != 0;
6464@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6465 {
6466 unsigned char c;
6467
6468- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6469+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6470+
6471+#ifdef CONFIG_PAX_REFCOUNT
6472+ "jno 0f\n"
6473+ LOCK_PREFIX "subq %2,%0\n"
6474+ "int $4\n0:\n"
6475+ _ASM_EXTABLE(0b, 0b)
6476+#endif
6477+
6478+ "sets %1\n"
6479 : "=m" (v->counter), "=qm" (c)
6480 : "er" (i), "m" (v->counter) : "memory");
6481 return c;
6482@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6483 static inline long atomic64_add_return(long i, atomic64_t *v)
6484 {
6485 long __i = i;
6486- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6487+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6488+
6489+#ifdef CONFIG_PAX_REFCOUNT
6490+ "jno 0f\n"
6491+ "movq %0, %1\n"
6492+ "int $4\n0:\n"
6493+ _ASM_EXTABLE(0b, 0b)
6494+#endif
6495+
6496+ : "+r" (i), "+m" (v->counter)
6497+ : : "memory");
6498+ return i + __i;
6499+}
6500+
6501+/**
6502+ * atomic64_add_return_unchecked - add and return
6503+ * @i: integer value to add
6504+ * @v: pointer to type atomic64_unchecked_t
6505+ *
6506+ * Atomically adds @i to @v and returns @i + @v
6507+ */
6508+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6509+{
6510+ long __i = i;
6511+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6512 : "+r" (i), "+m" (v->counter)
6513 : : "memory");
6514 return i + __i;
6515@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6516 }
6517
6518 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6519+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6520+{
6521+ return atomic64_add_return_unchecked(1, v);
6522+}
6523 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6524
6525 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6526@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6527 return cmpxchg(&v->counter, old, new);
6528 }
6529
6530+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6531+{
6532+ return cmpxchg(&v->counter, old, new);
6533+}
6534+
6535 static inline long atomic64_xchg(atomic64_t *v, long new)
6536 {
6537 return xchg(&v->counter, new);
6538@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6539 */
6540 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6541 {
6542- long c, old;
6543+ long c, old, new;
6544 c = atomic64_read(v);
6545 for (;;) {
6546- if (unlikely(c == (u)))
6547+ if (unlikely(c == u))
6548 break;
6549- old = atomic64_cmpxchg((v), c, c + (a));
6550+
6551+ asm volatile("add %2,%0\n"
6552+
6553+#ifdef CONFIG_PAX_REFCOUNT
6554+ "jno 0f\n"
6555+ "sub %2,%0\n"
6556+ "int $4\n0:\n"
6557+ _ASM_EXTABLE(0b, 0b)
6558+#endif
6559+
6560+ : "=r" (new)
6561+ : "0" (c), "ir" (a));
6562+
6563+ old = atomic64_cmpxchg(v, c, new);
6564 if (likely(old == c))
6565 break;
6566 c = old;
6567 }
6568- return c != (u);
6569+ return c != u;
6570 }
6571
6572 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6573diff -urNp linux-3.1.1/arch/x86/include/asm/atomic.h linux-3.1.1/arch/x86/include/asm/atomic.h
6574--- linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-11 15:19:27.000000000 -0500
6575+++ linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-16 18:39:07.000000000 -0500
6576@@ -22,7 +22,18 @@
6577 */
6578 static inline int atomic_read(const atomic_t *v)
6579 {
6580- return (*(volatile int *)&(v)->counter);
6581+ return (*(volatile const int *)&(v)->counter);
6582+}
6583+
6584+/**
6585+ * atomic_read_unchecked - read atomic variable
6586+ * @v: pointer of type atomic_unchecked_t
6587+ *
6588+ * Atomically reads the value of @v.
6589+ */
6590+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6591+{
6592+ return (*(volatile const int *)&(v)->counter);
6593 }
6594
6595 /**
6596@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6597 }
6598
6599 /**
6600+ * atomic_set_unchecked - set atomic variable
6601+ * @v: pointer of type atomic_unchecked_t
6602+ * @i: required value
6603+ *
6604+ * Atomically sets the value of @v to @i.
6605+ */
6606+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6607+{
6608+ v->counter = i;
6609+}
6610+
6611+/**
6612 * atomic_add - add integer to atomic variable
6613 * @i: integer value to add
6614 * @v: pointer of type atomic_t
6615@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6616 */
6617 static inline void atomic_add(int i, atomic_t *v)
6618 {
6619- asm volatile(LOCK_PREFIX "addl %1,%0"
6620+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6621+
6622+#ifdef CONFIG_PAX_REFCOUNT
6623+ "jno 0f\n"
6624+ LOCK_PREFIX "subl %1,%0\n"
6625+ "int $4\n0:\n"
6626+ _ASM_EXTABLE(0b, 0b)
6627+#endif
6628+
6629+ : "+m" (v->counter)
6630+ : "ir" (i));
6631+}
6632+
6633+/**
6634+ * atomic_add_unchecked - add integer to atomic variable
6635+ * @i: integer value to add
6636+ * @v: pointer of type atomic_unchecked_t
6637+ *
6638+ * Atomically adds @i to @v.
6639+ */
6640+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6641+{
6642+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6643 : "+m" (v->counter)
6644 : "ir" (i));
6645 }
6646@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6647 */
6648 static inline void atomic_sub(int i, atomic_t *v)
6649 {
6650- asm volatile(LOCK_PREFIX "subl %1,%0"
6651+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6652+
6653+#ifdef CONFIG_PAX_REFCOUNT
6654+ "jno 0f\n"
6655+ LOCK_PREFIX "addl %1,%0\n"
6656+ "int $4\n0:\n"
6657+ _ASM_EXTABLE(0b, 0b)
6658+#endif
6659+
6660+ : "+m" (v->counter)
6661+ : "ir" (i));
6662+}
6663+
6664+/**
6665+ * atomic_sub_unchecked - subtract integer from atomic variable
6666+ * @i: integer value to subtract
6667+ * @v: pointer of type atomic_unchecked_t
6668+ *
6669+ * Atomically subtracts @i from @v.
6670+ */
6671+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6672+{
6673+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6674 : "+m" (v->counter)
6675 : "ir" (i));
6676 }
6677@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6678 {
6679 unsigned char c;
6680
6681- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6682+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6683+
6684+#ifdef CONFIG_PAX_REFCOUNT
6685+ "jno 0f\n"
6686+ LOCK_PREFIX "addl %2,%0\n"
6687+ "int $4\n0:\n"
6688+ _ASM_EXTABLE(0b, 0b)
6689+#endif
6690+
6691+ "sete %1\n"
6692 : "+m" (v->counter), "=qm" (c)
6693 : "ir" (i) : "memory");
6694 return c;
6695@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6696 */
6697 static inline void atomic_inc(atomic_t *v)
6698 {
6699- asm volatile(LOCK_PREFIX "incl %0"
6700+ asm volatile(LOCK_PREFIX "incl %0\n"
6701+
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ "jno 0f\n"
6704+ LOCK_PREFIX "decl %0\n"
6705+ "int $4\n0:\n"
6706+ _ASM_EXTABLE(0b, 0b)
6707+#endif
6708+
6709+ : "+m" (v->counter));
6710+}
6711+
6712+/**
6713+ * atomic_inc_unchecked - increment atomic variable
6714+ * @v: pointer of type atomic_unchecked_t
6715+ *
6716+ * Atomically increments @v by 1.
6717+ */
6718+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6719+{
6720+ asm volatile(LOCK_PREFIX "incl %0\n"
6721 : "+m" (v->counter));
6722 }
6723
6724@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6725 */
6726 static inline void atomic_dec(atomic_t *v)
6727 {
6728- asm volatile(LOCK_PREFIX "decl %0"
6729+ asm volatile(LOCK_PREFIX "decl %0\n"
6730+
6731+#ifdef CONFIG_PAX_REFCOUNT
6732+ "jno 0f\n"
6733+ LOCK_PREFIX "incl %0\n"
6734+ "int $4\n0:\n"
6735+ _ASM_EXTABLE(0b, 0b)
6736+#endif
6737+
6738+ : "+m" (v->counter));
6739+}
6740+
6741+/**
6742+ * atomic_dec_unchecked - decrement atomic variable
6743+ * @v: pointer of type atomic_unchecked_t
6744+ *
6745+ * Atomically decrements @v by 1.
6746+ */
6747+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6748+{
6749+ asm volatile(LOCK_PREFIX "decl %0\n"
6750 : "+m" (v->counter));
6751 }
6752
6753@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6754 {
6755 unsigned char c;
6756
6757- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6758+ asm volatile(LOCK_PREFIX "decl %0\n"
6759+
6760+#ifdef CONFIG_PAX_REFCOUNT
6761+ "jno 0f\n"
6762+ LOCK_PREFIX "incl %0\n"
6763+ "int $4\n0:\n"
6764+ _ASM_EXTABLE(0b, 0b)
6765+#endif
6766+
6767+ "sete %1\n"
6768 : "+m" (v->counter), "=qm" (c)
6769 : : "memory");
6770 return c != 0;
6771@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6772 {
6773 unsigned char c;
6774
6775- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6776+ asm volatile(LOCK_PREFIX "incl %0\n"
6777+
6778+#ifdef CONFIG_PAX_REFCOUNT
6779+ "jno 0f\n"
6780+ LOCK_PREFIX "decl %0\n"
6781+ "int $4\n0:\n"
6782+ _ASM_EXTABLE(0b, 0b)
6783+#endif
6784+
6785+ "sete %1\n"
6786+ : "+m" (v->counter), "=qm" (c)
6787+ : : "memory");
6788+ return c != 0;
6789+}
6790+
6791+/**
6792+ * atomic_inc_and_test_unchecked - increment and test
6793+ * @v: pointer of type atomic_unchecked_t
6794+ *
6795+ * Atomically increments @v by 1
6796+ * and returns true if the result is zero, or false for all
6797+ * other cases.
6798+ */
6799+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6800+{
6801+ unsigned char c;
6802+
6803+ asm volatile(LOCK_PREFIX "incl %0\n"
6804+ "sete %1\n"
6805 : "+m" (v->counter), "=qm" (c)
6806 : : "memory");
6807 return c != 0;
6808@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6809 {
6810 unsigned char c;
6811
6812- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6813+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6814+
6815+#ifdef CONFIG_PAX_REFCOUNT
6816+ "jno 0f\n"
6817+ LOCK_PREFIX "subl %2,%0\n"
6818+ "int $4\n0:\n"
6819+ _ASM_EXTABLE(0b, 0b)
6820+#endif
6821+
6822+ "sets %1\n"
6823 : "+m" (v->counter), "=qm" (c)
6824 : "ir" (i) : "memory");
6825 return c;
6826@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6827 #endif
6828 /* Modern 486+ processor */
6829 __i = i;
6830+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6831+
6832+#ifdef CONFIG_PAX_REFCOUNT
6833+ "jno 0f\n"
6834+ "movl %0, %1\n"
6835+ "int $4\n0:\n"
6836+ _ASM_EXTABLE(0b, 0b)
6837+#endif
6838+
6839+ : "+r" (i), "+m" (v->counter)
6840+ : : "memory");
6841+ return i + __i;
6842+
6843+#ifdef CONFIG_M386
6844+no_xadd: /* Legacy 386 processor */
6845+ local_irq_save(flags);
6846+ __i = atomic_read(v);
6847+ atomic_set(v, i + __i);
6848+ local_irq_restore(flags);
6849+ return i + __i;
6850+#endif
6851+}
6852+
6853+/**
6854+ * atomic_add_return_unchecked - add integer and return
6855+ * @v: pointer of type atomic_unchecked_t
6856+ * @i: integer value to add
6857+ *
6858+ * Atomically adds @i to @v and returns @i + @v
6859+ */
6860+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6861+{
6862+ int __i;
6863+#ifdef CONFIG_M386
6864+ unsigned long flags;
6865+ if (unlikely(boot_cpu_data.x86 <= 3))
6866+ goto no_xadd;
6867+#endif
6868+ /* Modern 486+ processor */
6869+ __i = i;
6870 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6871 : "+r" (i), "+m" (v->counter)
6872 : : "memory");
6873@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6874 }
6875
6876 #define atomic_inc_return(v) (atomic_add_return(1, v))
6877+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6878+{
6879+ return atomic_add_return_unchecked(1, v);
6880+}
6881 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6882
6883 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6884@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6885 return cmpxchg(&v->counter, old, new);
6886 }
6887
6888+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6889+{
6890+ return cmpxchg(&v->counter, old, new);
6891+}
6892+
6893 static inline int atomic_xchg(atomic_t *v, int new)
6894 {
6895 return xchg(&v->counter, new);
6896 }
6897
6898+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6899+{
6900+ return xchg(&v->counter, new);
6901+}
6902+
6903 /**
6904 * __atomic_add_unless - add unless the number is already a given value
6905 * @v: pointer of type atomic_t
6906@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *
6907 */
6908 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6909 {
6910- int c, old;
6911+ int c, old, new;
6912 c = atomic_read(v);
6913 for (;;) {
6914- if (unlikely(c == (u)))
6915+ if (unlikely(c == u))
6916 break;
6917- old = atomic_cmpxchg((v), c, c + (a));
6918+
6919+ asm volatile("addl %2,%0\n"
6920+
6921+#ifdef CONFIG_PAX_REFCOUNT
6922+ "jno 0f\n"
6923+ "subl %2,%0\n"
6924+ "int $4\n0:\n"
6925+ _ASM_EXTABLE(0b, 0b)
6926+#endif
6927+
6928+ : "=r" (new)
6929+ : "0" (c), "ir" (a));
6930+
6931+ old = atomic_cmpxchg(v, c, new);
6932 if (likely(old == c))
6933 break;
6934 c = old;
6935@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(at
6936 return c;
6937 }
6938
6939+/**
6940+ * atomic_inc_not_zero_hint - increment if not null
6941+ * @v: pointer of type atomic_t
6942+ * @hint: probable value of the atomic before the increment
6943+ *
6944+ * This version of atomic_inc_not_zero() gives a hint of probable
6945+ * value of the atomic. This helps processor to not read the memory
6946+ * before doing the atomic read/modify/write cycle, lowering
6947+ * number of bus transactions on some arches.
6948+ *
6949+ * Returns: 0 if increment was not done, 1 otherwise.
6950+ */
6951+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6952+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6953+{
6954+ int val, c = hint, new;
6955+
6956+ /* sanity test, should be removed by compiler if hint is a constant */
6957+ if (!hint)
6958+ return __atomic_add_unless(v, 1, 0);
6959+
6960+ do {
6961+ asm volatile("incl %0\n"
6962+
6963+#ifdef CONFIG_PAX_REFCOUNT
6964+ "jno 0f\n"
6965+ "decl %0\n"
6966+ "int $4\n0:\n"
6967+ _ASM_EXTABLE(0b, 0b)
6968+#endif
6969+
6970+ : "=r" (new)
6971+ : "0" (c));
6972+
6973+ val = atomic_cmpxchg(v, c, new);
6974+ if (val == c)
6975+ return 1;
6976+ c = val;
6977+ } while (c);
6978+
6979+ return 0;
6980+}
6981
6982 /*
6983 * atomic_dec_if_positive - decrement by 1 if old value positive
6984diff -urNp linux-3.1.1/arch/x86/include/asm/bitops.h linux-3.1.1/arch/x86/include/asm/bitops.h
6985--- linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-11 15:19:27.000000000 -0500
6986+++ linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-16 18:39:07.000000000 -0500
6987@@ -38,7 +38,7 @@
6988 * a mask operation on a byte.
6989 */
6990 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6991-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6992+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6993 #define CONST_MASK(nr) (1 << ((nr) & 7))
6994
6995 /**
6996diff -urNp linux-3.1.1/arch/x86/include/asm/boot.h linux-3.1.1/arch/x86/include/asm/boot.h
6997--- linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-11 15:19:27.000000000 -0500
6998+++ linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-16 18:39:07.000000000 -0500
6999@@ -11,10 +11,15 @@
7000 #include <asm/pgtable_types.h>
7001
7002 /* Physical address where kernel should be loaded. */
7003-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7004+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7005 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7006 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7007
7008+#ifndef __ASSEMBLY__
7009+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7010+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7011+#endif
7012+
7013 /* Minimum kernel alignment, as a power of two */
7014 #ifdef CONFIG_X86_64
7015 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7016diff -urNp linux-3.1.1/arch/x86/include/asm/cacheflush.h linux-3.1.1/arch/x86/include/asm/cacheflush.h
7017--- linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-11 15:19:27.000000000 -0500
7018+++ linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-16 18:39:07.000000000 -0500
7019@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7020 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7021
7022 if (pg_flags == _PGMT_DEFAULT)
7023- return -1;
7024+ return ~0UL;
7025 else if (pg_flags == _PGMT_WC)
7026 return _PAGE_CACHE_WC;
7027 else if (pg_flags == _PGMT_UC_MINUS)
7028diff -urNp linux-3.1.1/arch/x86/include/asm/cache.h linux-3.1.1/arch/x86/include/asm/cache.h
7029--- linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
7030+++ linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
7031@@ -5,12 +5,13 @@
7032
7033 /* L1 cache line size */
7034 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7035-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7036+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7037
7038 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7039+#define __read_only __attribute__((__section__(".data..read_only")))
7040
7041 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7042-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7043+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7044
7045 #ifdef CONFIG_X86_VSMP
7046 #ifdef CONFIG_SMP
7047diff -urNp linux-3.1.1/arch/x86/include/asm/checksum_32.h linux-3.1.1/arch/x86/include/asm/checksum_32.h
7048--- linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-11 15:19:27.000000000 -0500
7049+++ linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-16 18:39:07.000000000 -0500
7050@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7051 int len, __wsum sum,
7052 int *src_err_ptr, int *dst_err_ptr);
7053
7054+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7055+ int len, __wsum sum,
7056+ int *src_err_ptr, int *dst_err_ptr);
7057+
7058+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7059+ int len, __wsum sum,
7060+ int *src_err_ptr, int *dst_err_ptr);
7061+
7062 /*
7063 * Note: when you get a NULL pointer exception here this means someone
7064 * passed in an incorrect kernel address to one of these functions.
7065@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7066 int *err_ptr)
7067 {
7068 might_sleep();
7069- return csum_partial_copy_generic((__force void *)src, dst,
7070+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7071 len, sum, err_ptr, NULL);
7072 }
7073
7074@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7075 {
7076 might_sleep();
7077 if (access_ok(VERIFY_WRITE, dst, len))
7078- return csum_partial_copy_generic(src, (__force void *)dst,
7079+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7080 len, sum, NULL, err_ptr);
7081
7082 if (len)
7083diff -urNp linux-3.1.1/arch/x86/include/asm/cpufeature.h linux-3.1.1/arch/x86/include/asm/cpufeature.h
7084--- linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-11 15:19:27.000000000 -0500
7085+++ linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-16 18:39:07.000000000 -0500
7086@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7087 ".section .discard,\"aw\",@progbits\n"
7088 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7089 ".previous\n"
7090- ".section .altinstr_replacement,\"ax\"\n"
7091+ ".section .altinstr_replacement,\"a\"\n"
7092 "3: movb $1,%0\n"
7093 "4:\n"
7094 ".previous\n"
7095diff -urNp linux-3.1.1/arch/x86/include/asm/desc_defs.h linux-3.1.1/arch/x86/include/asm/desc_defs.h
7096--- linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-11 15:19:27.000000000 -0500
7097+++ linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-16 18:39:07.000000000 -0500
7098@@ -31,6 +31,12 @@ struct desc_struct {
7099 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7100 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7101 };
7102+ struct {
7103+ u16 offset_low;
7104+ u16 seg;
7105+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7106+ unsigned offset_high: 16;
7107+ } gate;
7108 };
7109 } __attribute__((packed));
7110
7111diff -urNp linux-3.1.1/arch/x86/include/asm/desc.h linux-3.1.1/arch/x86/include/asm/desc.h
7112--- linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-11 15:19:27.000000000 -0500
7113+++ linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-16 18:39:07.000000000 -0500
7114@@ -4,6 +4,7 @@
7115 #include <asm/desc_defs.h>
7116 #include <asm/ldt.h>
7117 #include <asm/mmu.h>
7118+#include <asm/pgtable.h>
7119
7120 #include <linux/smp.h>
7121
7122@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7123
7124 desc->type = (info->read_exec_only ^ 1) << 1;
7125 desc->type |= info->contents << 2;
7126+ desc->type |= info->seg_not_present ^ 1;
7127
7128 desc->s = 1;
7129 desc->dpl = 0x3;
7130@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7131 }
7132
7133 extern struct desc_ptr idt_descr;
7134-extern gate_desc idt_table[];
7135-
7136-struct gdt_page {
7137- struct desc_struct gdt[GDT_ENTRIES];
7138-} __attribute__((aligned(PAGE_SIZE)));
7139-
7140-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7141+extern gate_desc idt_table[256];
7142
7143+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7144 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7145 {
7146- return per_cpu(gdt_page, cpu).gdt;
7147+ return cpu_gdt_table[cpu];
7148 }
7149
7150 #ifdef CONFIG_X86_64
7151@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7152 unsigned long base, unsigned dpl, unsigned flags,
7153 unsigned short seg)
7154 {
7155- gate->a = (seg << 16) | (base & 0xffff);
7156- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7157+ gate->gate.offset_low = base;
7158+ gate->gate.seg = seg;
7159+ gate->gate.reserved = 0;
7160+ gate->gate.type = type;
7161+ gate->gate.s = 0;
7162+ gate->gate.dpl = dpl;
7163+ gate->gate.p = 1;
7164+ gate->gate.offset_high = base >> 16;
7165 }
7166
7167 #endif
7168@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7169
7170 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7171 {
7172+ pax_open_kernel();
7173 memcpy(&idt[entry], gate, sizeof(*gate));
7174+ pax_close_kernel();
7175 }
7176
7177 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7178 {
7179+ pax_open_kernel();
7180 memcpy(&ldt[entry], desc, 8);
7181+ pax_close_kernel();
7182 }
7183
7184 static inline void
7185@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7186 default: size = sizeof(*gdt); break;
7187 }
7188
7189+ pax_open_kernel();
7190 memcpy(&gdt[entry], desc, size);
7191+ pax_close_kernel();
7192 }
7193
7194 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7195@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7196
7197 static inline void native_load_tr_desc(void)
7198 {
7199+ pax_open_kernel();
7200 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7201+ pax_close_kernel();
7202 }
7203
7204 static inline void native_load_gdt(const struct desc_ptr *dtr)
7205@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7206 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7207 unsigned int i;
7208
7209+ pax_open_kernel();
7210 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7211 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7212+ pax_close_kernel();
7213 }
7214
7215 #define _LDT_empty(info) \
7216@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7217 desc->limit = (limit >> 16) & 0xf;
7218 }
7219
7220-static inline void _set_gate(int gate, unsigned type, void *addr,
7221+static inline void _set_gate(int gate, unsigned type, const void *addr,
7222 unsigned dpl, unsigned ist, unsigned seg)
7223 {
7224 gate_desc s;
7225@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7226 * Pentium F0 0F bugfix can have resulted in the mapped
7227 * IDT being write-protected.
7228 */
7229-static inline void set_intr_gate(unsigned int n, void *addr)
7230+static inline void set_intr_gate(unsigned int n, const void *addr)
7231 {
7232 BUG_ON((unsigned)n > 0xFF);
7233 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7234@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7235 /*
7236 * This routine sets up an interrupt gate at directory privilege level 3.
7237 */
7238-static inline void set_system_intr_gate(unsigned int n, void *addr)
7239+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7240 {
7241 BUG_ON((unsigned)n > 0xFF);
7242 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7243 }
7244
7245-static inline void set_system_trap_gate(unsigned int n, void *addr)
7246+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7247 {
7248 BUG_ON((unsigned)n > 0xFF);
7249 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7250 }
7251
7252-static inline void set_trap_gate(unsigned int n, void *addr)
7253+static inline void set_trap_gate(unsigned int n, const void *addr)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7257@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7258 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7259 {
7260 BUG_ON((unsigned)n > 0xFF);
7261- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7262+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7263 }
7264
7265-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7266+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7267 {
7268 BUG_ON((unsigned)n > 0xFF);
7269 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7270 }
7271
7272-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7273+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7274 {
7275 BUG_ON((unsigned)n > 0xFF);
7276 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7277 }
7278
7279+#ifdef CONFIG_X86_32
7280+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7281+{
7282+ struct desc_struct d;
7283+
7284+ if (likely(limit))
7285+ limit = (limit - 1UL) >> PAGE_SHIFT;
7286+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7287+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7288+}
7289+#endif
7290+
7291 #endif /* _ASM_X86_DESC_H */
7292diff -urNp linux-3.1.1/arch/x86/include/asm/e820.h linux-3.1.1/arch/x86/include/asm/e820.h
7293--- linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-11 15:19:27.000000000 -0500
7294+++ linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-16 18:39:07.000000000 -0500
7295@@ -69,7 +69,7 @@ struct e820map {
7296 #define ISA_START_ADDRESS 0xa0000
7297 #define ISA_END_ADDRESS 0x100000
7298
7299-#define BIOS_BEGIN 0x000a0000
7300+#define BIOS_BEGIN 0x000c0000
7301 #define BIOS_END 0x00100000
7302
7303 #define BIOS_ROM_BASE 0xffe00000
7304diff -urNp linux-3.1.1/arch/x86/include/asm/elf.h linux-3.1.1/arch/x86/include/asm/elf.h
7305--- linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
7306+++ linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
7307@@ -237,7 +237,25 @@ extern int force_personality32;
7308 the loader. We need to make sure that it is out of the way of the program
7309 that it will "exec", and that there is sufficient room for the brk. */
7310
7311+#ifdef CONFIG_PAX_SEGMEXEC
7312+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7313+#else
7314 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7315+#endif
7316+
7317+#ifdef CONFIG_PAX_ASLR
7318+#ifdef CONFIG_X86_32
7319+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7320+
7321+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7322+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7323+#else
7324+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7325+
7326+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7327+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7328+#endif
7329+#endif
7330
7331 /* This yields a mask that user programs can use to figure out what
7332 instruction set this CPU supports. This could be done in user space,
7333@@ -290,9 +308,7 @@ do { \
7334
7335 #define ARCH_DLINFO \
7336 do { \
7337- if (vdso_enabled) \
7338- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7339- (unsigned long)current->mm->context.vdso); \
7340+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7341 } while (0)
7342
7343 #define AT_SYSINFO 32
7344@@ -303,7 +319,7 @@ do { \
7345
7346 #endif /* !CONFIG_X86_32 */
7347
7348-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7349+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7350
7351 #define VDSO_ENTRY \
7352 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7353@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7354 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7355 #define compat_arch_setup_additional_pages syscall32_setup_pages
7356
7357-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7358-#define arch_randomize_brk arch_randomize_brk
7359-
7360 #endif /* _ASM_X86_ELF_H */
7361diff -urNp linux-3.1.1/arch/x86/include/asm/emergency-restart.h linux-3.1.1/arch/x86/include/asm/emergency-restart.h
7362--- linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-11 15:19:27.000000000 -0500
7363+++ linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-16 18:39:07.000000000 -0500
7364@@ -15,6 +15,6 @@ enum reboot_type {
7365
7366 extern enum reboot_type reboot_type;
7367
7368-extern void machine_emergency_restart(void);
7369+extern void machine_emergency_restart(void) __noreturn;
7370
7371 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7372diff -urNp linux-3.1.1/arch/x86/include/asm/futex.h linux-3.1.1/arch/x86/include/asm/futex.h
7373--- linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-11 15:19:27.000000000 -0500
7374+++ linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-16 18:39:07.000000000 -0500
7375@@ -12,16 +12,18 @@
7376 #include <asm/system.h>
7377
7378 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7379+ typecheck(u32 __user *, uaddr); \
7380 asm volatile("1:\t" insn "\n" \
7381 "2:\t.section .fixup,\"ax\"\n" \
7382 "3:\tmov\t%3, %1\n" \
7383 "\tjmp\t2b\n" \
7384 "\t.previous\n" \
7385 _ASM_EXTABLE(1b, 3b) \
7386- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7387+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7388 : "i" (-EFAULT), "0" (oparg), "1" (0))
7389
7390 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7391+ typecheck(u32 __user *, uaddr); \
7392 asm volatile("1:\tmovl %2, %0\n" \
7393 "\tmovl\t%0, %3\n" \
7394 "\t" insn "\n" \
7395@@ -34,7 +36,7 @@
7396 _ASM_EXTABLE(1b, 4b) \
7397 _ASM_EXTABLE(2b, 4b) \
7398 : "=&a" (oldval), "=&r" (ret), \
7399- "+m" (*uaddr), "=&r" (tem) \
7400+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7401 : "r" (oparg), "i" (-EFAULT), "1" (0))
7402
7403 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7404@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7405
7406 switch (op) {
7407 case FUTEX_OP_SET:
7408- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7409+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7410 break;
7411 case FUTEX_OP_ADD:
7412- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7413+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7414 uaddr, oparg);
7415 break;
7416 case FUTEX_OP_OR:
7417@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7418 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7419 return -EFAULT;
7420
7421- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7422+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7423 "2:\t.section .fixup, \"ax\"\n"
7424 "3:\tmov %3, %0\n"
7425 "\tjmp 2b\n"
7426 "\t.previous\n"
7427 _ASM_EXTABLE(1b, 3b)
7428- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7429+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7430 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7431 : "memory"
7432 );
7433diff -urNp linux-3.1.1/arch/x86/include/asm/hw_irq.h linux-3.1.1/arch/x86/include/asm/hw_irq.h
7434--- linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-11 15:19:27.000000000 -0500
7435+++ linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-16 18:39:07.000000000 -0500
7436@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
7437 extern void enable_IO_APIC(void);
7438
7439 /* Statistics */
7440-extern atomic_t irq_err_count;
7441-extern atomic_t irq_mis_count;
7442+extern atomic_unchecked_t irq_err_count;
7443+extern atomic_unchecked_t irq_mis_count;
7444
7445 /* EISA */
7446 extern void eisa_set_level_irq(unsigned int irq);
7447diff -urNp linux-3.1.1/arch/x86/include/asm/i387.h linux-3.1.1/arch/x86/include/asm/i387.h
7448--- linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-11 15:19:27.000000000 -0500
7449+++ linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-16 18:39:07.000000000 -0500
7450@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7451 {
7452 int err;
7453
7454+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7455+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7456+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7457+#endif
7458+
7459 /* See comment in fxsave() below. */
7460 #ifdef CONFIG_AS_FXSAVEQ
7461 asm volatile("1: fxrstorq %[fx]\n\t"
7462@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7463 {
7464 int err;
7465
7466+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7467+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7468+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7469+#endif
7470+
7471 /*
7472 * Clear the bytes not touched by the fxsave and reserved
7473 * for the SW usage.
7474@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7475 #endif /* CONFIG_X86_64 */
7476
7477 /* We need a safe address that is cheap to find and that is already
7478- in L1 during context switch. The best choices are unfortunately
7479- different for UP and SMP */
7480-#ifdef CONFIG_SMP
7481-#define safe_address (__per_cpu_offset[0])
7482-#else
7483-#define safe_address (kstat_cpu(0).cpustat.user)
7484-#endif
7485+ in L1 during context switch. */
7486+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7487
7488 /*
7489 * These must be called with preempt disabled
7490@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7491 struct thread_info *me = current_thread_info();
7492 preempt_disable();
7493 if (me->status & TS_USEDFPU)
7494- __save_init_fpu(me->task);
7495+ __save_init_fpu(current);
7496 else
7497 clts();
7498 }
7499diff -urNp linux-3.1.1/arch/x86/include/asm/io.h linux-3.1.1/arch/x86/include/asm/io.h
7500--- linux-3.1.1/arch/x86/include/asm/io.h 2011-11-11 15:19:27.000000000 -0500
7501+++ linux-3.1.1/arch/x86/include/asm/io.h 2011-11-16 18:39:07.000000000 -0500
7502@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
7503
7504 #include <linux/vmalloc.h>
7505
7506+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7507+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7508+{
7509+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510+}
7511+
7512+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7513+{
7514+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7515+}
7516+
7517 /*
7518 * Convert a virtual cached pointer to an uncached pointer
7519 */
7520diff -urNp linux-3.1.1/arch/x86/include/asm/irqflags.h linux-3.1.1/arch/x86/include/asm/irqflags.h
7521--- linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-11 15:19:27.000000000 -0500
7522+++ linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-16 18:39:07.000000000 -0500
7523@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
7524 sti; \
7525 sysexit
7526
7527+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7528+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7529+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7530+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7531+
7532 #else
7533 #define INTERRUPT_RETURN iret
7534 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7535diff -urNp linux-3.1.1/arch/x86/include/asm/kprobes.h linux-3.1.1/arch/x86/include/asm/kprobes.h
7536--- linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-11 15:19:27.000000000 -0500
7537+++ linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-16 18:39:07.000000000 -0500
7538@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7539 #define RELATIVEJUMP_SIZE 5
7540 #define RELATIVECALL_OPCODE 0xe8
7541 #define RELATIVE_ADDR_SIZE 4
7542-#define MAX_STACK_SIZE 64
7543-#define MIN_STACK_SIZE(ADDR) \
7544- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7545- THREAD_SIZE - (unsigned long)(ADDR))) \
7546- ? (MAX_STACK_SIZE) \
7547- : (((unsigned long)current_thread_info()) + \
7548- THREAD_SIZE - (unsigned long)(ADDR)))
7549+#define MAX_STACK_SIZE 64UL
7550+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7551
7552 #define flush_insn_slot(p) do { } while (0)
7553
7554diff -urNp linux-3.1.1/arch/x86/include/asm/kvm_host.h linux-3.1.1/arch/x86/include/asm/kvm_host.h
7555--- linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
7556+++ linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-16 18:39:07.000000000 -0500
7557@@ -456,7 +456,7 @@ struct kvm_arch {
7558 unsigned int n_requested_mmu_pages;
7559 unsigned int n_max_mmu_pages;
7560 unsigned int indirect_shadow_pages;
7561- atomic_t invlpg_counter;
7562+ atomic_unchecked_t invlpg_counter;
7563 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7564 /*
7565 * Hash table of struct kvm_mmu_page.
7566@@ -636,7 +636,7 @@ struct kvm_x86_ops {
7567 enum x86_intercept_stage stage);
7568
7569 const struct trace_print_flags *exit_reasons_str;
7570-};
7571+} __do_const;
7572
7573 struct kvm_arch_async_pf {
7574 u32 token;
7575diff -urNp linux-3.1.1/arch/x86/include/asm/local.h linux-3.1.1/arch/x86/include/asm/local.h
7576--- linux-3.1.1/arch/x86/include/asm/local.h 2011-11-11 15:19:27.000000000 -0500
7577+++ linux-3.1.1/arch/x86/include/asm/local.h 2011-11-16 18:39:07.000000000 -0500
7578@@ -18,26 +18,58 @@ typedef struct {
7579
7580 static inline void local_inc(local_t *l)
7581 {
7582- asm volatile(_ASM_INC "%0"
7583+ asm volatile(_ASM_INC "%0\n"
7584+
7585+#ifdef CONFIG_PAX_REFCOUNT
7586+ "jno 0f\n"
7587+ _ASM_DEC "%0\n"
7588+ "int $4\n0:\n"
7589+ _ASM_EXTABLE(0b, 0b)
7590+#endif
7591+
7592 : "+m" (l->a.counter));
7593 }
7594
7595 static inline void local_dec(local_t *l)
7596 {
7597- asm volatile(_ASM_DEC "%0"
7598+ asm volatile(_ASM_DEC "%0\n"
7599+
7600+#ifdef CONFIG_PAX_REFCOUNT
7601+ "jno 0f\n"
7602+ _ASM_INC "%0\n"
7603+ "int $4\n0:\n"
7604+ _ASM_EXTABLE(0b, 0b)
7605+#endif
7606+
7607 : "+m" (l->a.counter));
7608 }
7609
7610 static inline void local_add(long i, local_t *l)
7611 {
7612- asm volatile(_ASM_ADD "%1,%0"
7613+ asm volatile(_ASM_ADD "%1,%0\n"
7614+
7615+#ifdef CONFIG_PAX_REFCOUNT
7616+ "jno 0f\n"
7617+ _ASM_SUB "%1,%0\n"
7618+ "int $4\n0:\n"
7619+ _ASM_EXTABLE(0b, 0b)
7620+#endif
7621+
7622 : "+m" (l->a.counter)
7623 : "ir" (i));
7624 }
7625
7626 static inline void local_sub(long i, local_t *l)
7627 {
7628- asm volatile(_ASM_SUB "%1,%0"
7629+ asm volatile(_ASM_SUB "%1,%0\n"
7630+
7631+#ifdef CONFIG_PAX_REFCOUNT
7632+ "jno 0f\n"
7633+ _ASM_ADD "%1,%0\n"
7634+ "int $4\n0:\n"
7635+ _ASM_EXTABLE(0b, 0b)
7636+#endif
7637+
7638 : "+m" (l->a.counter)
7639 : "ir" (i));
7640 }
7641@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7642 {
7643 unsigned char c;
7644
7645- asm volatile(_ASM_SUB "%2,%0; sete %1"
7646+ asm volatile(_ASM_SUB "%2,%0\n"
7647+
7648+#ifdef CONFIG_PAX_REFCOUNT
7649+ "jno 0f\n"
7650+ _ASM_ADD "%2,%0\n"
7651+ "int $4\n0:\n"
7652+ _ASM_EXTABLE(0b, 0b)
7653+#endif
7654+
7655+ "sete %1\n"
7656 : "+m" (l->a.counter), "=qm" (c)
7657 : "ir" (i) : "memory");
7658 return c;
7659@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7660 {
7661 unsigned char c;
7662
7663- asm volatile(_ASM_DEC "%0; sete %1"
7664+ asm volatile(_ASM_DEC "%0\n"
7665+
7666+#ifdef CONFIG_PAX_REFCOUNT
7667+ "jno 0f\n"
7668+ _ASM_INC "%0\n"
7669+ "int $4\n0:\n"
7670+ _ASM_EXTABLE(0b, 0b)
7671+#endif
7672+
7673+ "sete %1\n"
7674 : "+m" (l->a.counter), "=qm" (c)
7675 : : "memory");
7676 return c != 0;
7677@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7678 {
7679 unsigned char c;
7680
7681- asm volatile(_ASM_INC "%0; sete %1"
7682+ asm volatile(_ASM_INC "%0\n"
7683+
7684+#ifdef CONFIG_PAX_REFCOUNT
7685+ "jno 0f\n"
7686+ _ASM_DEC "%0\n"
7687+ "int $4\n0:\n"
7688+ _ASM_EXTABLE(0b, 0b)
7689+#endif
7690+
7691+ "sete %1\n"
7692 : "+m" (l->a.counter), "=qm" (c)
7693 : : "memory");
7694 return c != 0;
7695@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7696 {
7697 unsigned char c;
7698
7699- asm volatile(_ASM_ADD "%2,%0; sets %1"
7700+ asm volatile(_ASM_ADD "%2,%0\n"
7701+
7702+#ifdef CONFIG_PAX_REFCOUNT
7703+ "jno 0f\n"
7704+ _ASM_SUB "%2,%0\n"
7705+ "int $4\n0:\n"
7706+ _ASM_EXTABLE(0b, 0b)
7707+#endif
7708+
7709+ "sets %1\n"
7710 : "+m" (l->a.counter), "=qm" (c)
7711 : "ir" (i) : "memory");
7712 return c;
7713@@ -133,7 +201,15 @@ static inline long local_add_return(long
7714 #endif
7715 /* Modern 486+ processor */
7716 __i = i;
7717- asm volatile(_ASM_XADD "%0, %1;"
7718+ asm volatile(_ASM_XADD "%0, %1\n"
7719+
7720+#ifdef CONFIG_PAX_REFCOUNT
7721+ "jno 0f\n"
7722+ _ASM_MOV "%0,%1\n"
7723+ "int $4\n0:\n"
7724+ _ASM_EXTABLE(0b, 0b)
7725+#endif
7726+
7727 : "+r" (i), "+m" (l->a.counter)
7728 : : "memory");
7729 return i + __i;
7730diff -urNp linux-3.1.1/arch/x86/include/asm/mman.h linux-3.1.1/arch/x86/include/asm/mman.h
7731--- linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
7732+++ linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
7733@@ -5,4 +5,14 @@
7734
7735 #include <asm-generic/mman.h>
7736
7737+#ifdef __KERNEL__
7738+#ifndef __ASSEMBLY__
7739+#ifdef CONFIG_X86_32
7740+#define arch_mmap_check i386_mmap_check
7741+int i386_mmap_check(unsigned long addr, unsigned long len,
7742+ unsigned long flags);
7743+#endif
7744+#endif
7745+#endif
7746+
7747 #endif /* _ASM_X86_MMAN_H */
7748diff -urNp linux-3.1.1/arch/x86/include/asm/mmu_context.h linux-3.1.1/arch/x86/include/asm/mmu_context.h
7749--- linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-11 15:19:27.000000000 -0500
7750+++ linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-16 18:39:07.000000000 -0500
7751@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7752
7753 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7754 {
7755+
7756+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7757+ unsigned int i;
7758+ pgd_t *pgd;
7759+
7760+ pax_open_kernel();
7761+ pgd = get_cpu_pgd(smp_processor_id());
7762+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7763+ set_pgd_batched(pgd+i, native_make_pgd(0));
7764+ pax_close_kernel();
7765+#endif
7766+
7767 #ifdef CONFIG_SMP
7768 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7769 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7770@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7771 struct task_struct *tsk)
7772 {
7773 unsigned cpu = smp_processor_id();
7774+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7775+ int tlbstate = TLBSTATE_OK;
7776+#endif
7777
7778 if (likely(prev != next)) {
7779 #ifdef CONFIG_SMP
7780+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7781+ tlbstate = percpu_read(cpu_tlbstate.state);
7782+#endif
7783 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7784 percpu_write(cpu_tlbstate.active_mm, next);
7785 #endif
7786 cpumask_set_cpu(cpu, mm_cpumask(next));
7787
7788 /* Re-load page tables */
7789+#ifdef CONFIG_PAX_PER_CPU_PGD
7790+ pax_open_kernel();
7791+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7792+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7793+ pax_close_kernel();
7794+ load_cr3(get_cpu_pgd(cpu));
7795+#else
7796 load_cr3(next->pgd);
7797+#endif
7798
7799 /* stop flush ipis for the previous mm */
7800 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7801@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7802 */
7803 if (unlikely(prev->context.ldt != next->context.ldt))
7804 load_LDT_nolock(&next->context);
7805- }
7806+
7807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7808+ if (!(__supported_pte_mask & _PAGE_NX)) {
7809+ smp_mb__before_clear_bit();
7810+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7811+ smp_mb__after_clear_bit();
7812+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7813+ }
7814+#endif
7815+
7816+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7817+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7818+ prev->context.user_cs_limit != next->context.user_cs_limit))
7819+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7820 #ifdef CONFIG_SMP
7821+ else if (unlikely(tlbstate != TLBSTATE_OK))
7822+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7823+#endif
7824+#endif
7825+
7826+ }
7827 else {
7828+
7829+#ifdef CONFIG_PAX_PER_CPU_PGD
7830+ pax_open_kernel();
7831+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7832+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7833+ pax_close_kernel();
7834+ load_cr3(get_cpu_pgd(cpu));
7835+#endif
7836+
7837+#ifdef CONFIG_SMP
7838 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7839 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7840
7841@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7842 * tlb flush IPI delivery. We must reload CR3
7843 * to make sure to use no freed page tables.
7844 */
7845+
7846+#ifndef CONFIG_PAX_PER_CPU_PGD
7847 load_cr3(next->pgd);
7848+#endif
7849+
7850 load_LDT_nolock(&next->context);
7851+
7852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7853+ if (!(__supported_pte_mask & _PAGE_NX))
7854+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7855+#endif
7856+
7857+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7858+#ifdef CONFIG_PAX_PAGEEXEC
7859+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7860+#endif
7861+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7862+#endif
7863+
7864 }
7865- }
7866 #endif
7867+ }
7868 }
7869
7870 #define activate_mm(prev, next) \
7871diff -urNp linux-3.1.1/arch/x86/include/asm/mmu.h linux-3.1.1/arch/x86/include/asm/mmu.h
7872--- linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-11 15:19:27.000000000 -0500
7873+++ linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-16 18:39:07.000000000 -0500
7874@@ -9,7 +9,7 @@
7875 * we put the segment information here.
7876 */
7877 typedef struct {
7878- void *ldt;
7879+ struct desc_struct *ldt;
7880 int size;
7881
7882 #ifdef CONFIG_X86_64
7883@@ -18,7 +18,19 @@ typedef struct {
7884 #endif
7885
7886 struct mutex lock;
7887- void *vdso;
7888+ unsigned long vdso;
7889+
7890+#ifdef CONFIG_X86_32
7891+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7892+ unsigned long user_cs_base;
7893+ unsigned long user_cs_limit;
7894+
7895+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7896+ cpumask_t cpu_user_cs_mask;
7897+#endif
7898+
7899+#endif
7900+#endif
7901 } mm_context_t;
7902
7903 #ifdef CONFIG_SMP
7904diff -urNp linux-3.1.1/arch/x86/include/asm/module.h linux-3.1.1/arch/x86/include/asm/module.h
7905--- linux-3.1.1/arch/x86/include/asm/module.h 2011-11-11 15:19:27.000000000 -0500
7906+++ linux-3.1.1/arch/x86/include/asm/module.h 2011-11-16 18:39:07.000000000 -0500
7907@@ -5,6 +5,7 @@
7908
7909 #ifdef CONFIG_X86_64
7910 /* X86_64 does not define MODULE_PROC_FAMILY */
7911+#define MODULE_PROC_FAMILY ""
7912 #elif defined CONFIG_M386
7913 #define MODULE_PROC_FAMILY "386 "
7914 #elif defined CONFIG_M486
7915@@ -59,8 +60,18 @@
7916 #error unknown processor family
7917 #endif
7918
7919-#ifdef CONFIG_X86_32
7920-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7921+#ifdef CONFIG_PAX_KERNEXEC
7922+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7923+#else
7924+#define MODULE_PAX_KERNEXEC ""
7925 #endif
7926
7927+#ifdef CONFIG_PAX_MEMORY_UDEREF
7928+#define MODULE_PAX_UDEREF "UDEREF "
7929+#else
7930+#define MODULE_PAX_UDEREF ""
7931+#endif
7932+
7933+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7934+
7935 #endif /* _ASM_X86_MODULE_H */
7936diff -urNp linux-3.1.1/arch/x86/include/asm/page_64_types.h linux-3.1.1/arch/x86/include/asm/page_64_types.h
7937--- linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-11 15:19:27.000000000 -0500
7938+++ linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-16 18:39:07.000000000 -0500
7939@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7940
7941 /* duplicated to the one in bootmem.h */
7942 extern unsigned long max_pfn;
7943-extern unsigned long phys_base;
7944+extern const unsigned long phys_base;
7945
7946 extern unsigned long __phys_addr(unsigned long);
7947 #define __phys_reloc_hide(x) (x)
7948diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt.h linux-3.1.1/arch/x86/include/asm/paravirt.h
7949--- linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-11 15:19:27.000000000 -0500
7950+++ linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-16 18:39:07.000000000 -0500
7951@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp,
7952 val);
7953 }
7954
7955+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7956+{
7957+ pgdval_t val = native_pgd_val(pgd);
7958+
7959+ if (sizeof(pgdval_t) > sizeof(long))
7960+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7961+ val, (u64)val >> 32);
7962+ else
7963+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7964+ val);
7965+}
7966+
7967 static inline void pgd_clear(pgd_t *pgdp)
7968 {
7969 set_pgd(pgdp, __pgd(0));
7970@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned
7971 pv_mmu_ops.set_fixmap(idx, phys, flags);
7972 }
7973
7974+#ifdef CONFIG_PAX_KERNEXEC
7975+static inline unsigned long pax_open_kernel(void)
7976+{
7977+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7978+}
7979+
7980+static inline unsigned long pax_close_kernel(void)
7981+{
7982+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7983+}
7984+#else
7985+static inline unsigned long pax_open_kernel(void) { return 0; }
7986+static inline unsigned long pax_close_kernel(void) { return 0; }
7987+#endif
7988+
7989 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7990
7991 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7992@@ -964,7 +991,7 @@ extern void default_banner(void);
7993
7994 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7995 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7996-#define PARA_INDIRECT(addr) *%cs:addr
7997+#define PARA_INDIRECT(addr) *%ss:addr
7998 #endif
7999
8000 #define INTERRUPT_RETURN \
8001@@ -1041,6 +1068,21 @@ extern void default_banner(void);
8002 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8003 CLBR_NONE, \
8004 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8005+
8006+#define GET_CR0_INTO_RDI \
8007+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8008+ mov %rax,%rdi
8009+
8010+#define SET_RDI_INTO_CR0 \
8011+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8012+
8013+#define GET_CR3_INTO_RDI \
8014+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8015+ mov %rax,%rdi
8016+
8017+#define SET_RDI_INTO_CR3 \
8018+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8019+
8020 #endif /* CONFIG_X86_32 */
8021
8022 #endif /* __ASSEMBLY__ */
8023diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt_types.h linux-3.1.1/arch/x86/include/asm/paravirt_types.h
8024--- linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-11 15:19:27.000000000 -0500
8025+++ linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-16 18:39:07.000000000 -0500
8026@@ -84,20 +84,20 @@ struct pv_init_ops {
8027 */
8028 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8029 unsigned long addr, unsigned len);
8030-};
8031+} __no_const;
8032
8033
8034 struct pv_lazy_ops {
8035 /* Set deferred update mode, used for batching operations. */
8036 void (*enter)(void);
8037 void (*leave)(void);
8038-};
8039+} __no_const;
8040
8041 struct pv_time_ops {
8042 unsigned long long (*sched_clock)(void);
8043 unsigned long long (*steal_clock)(int cpu);
8044 unsigned long (*get_tsc_khz)(void);
8045-};
8046+} __no_const;
8047
8048 struct pv_cpu_ops {
8049 /* hooks for various privileged instructions */
8050@@ -193,7 +193,7 @@ struct pv_cpu_ops {
8051
8052 void (*start_context_switch)(struct task_struct *prev);
8053 void (*end_context_switch)(struct task_struct *next);
8054-};
8055+} __no_const;
8056
8057 struct pv_irq_ops {
8058 /*
8059@@ -224,7 +224,7 @@ struct pv_apic_ops {
8060 unsigned long start_eip,
8061 unsigned long start_esp);
8062 #endif
8063-};
8064+} __no_const;
8065
8066 struct pv_mmu_ops {
8067 unsigned long (*read_cr2)(void);
8068@@ -313,6 +313,7 @@ struct pv_mmu_ops {
8069 struct paravirt_callee_save make_pud;
8070
8071 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8072+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8073 #endif /* PAGETABLE_LEVELS == 4 */
8074 #endif /* PAGETABLE_LEVELS >= 3 */
8075
8076@@ -324,6 +325,12 @@ struct pv_mmu_ops {
8077 an mfn. We can tell which is which from the index. */
8078 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8079 phys_addr_t phys, pgprot_t flags);
8080+
8081+#ifdef CONFIG_PAX_KERNEXEC
8082+ unsigned long (*pax_open_kernel)(void);
8083+ unsigned long (*pax_close_kernel)(void);
8084+#endif
8085+
8086 };
8087
8088 struct arch_spinlock;
8089@@ -334,7 +341,7 @@ struct pv_lock_ops {
8090 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8091 int (*spin_trylock)(struct arch_spinlock *lock);
8092 void (*spin_unlock)(struct arch_spinlock *lock);
8093-};
8094+} __no_const;
8095
8096 /* This contains all the paravirt structures: we get a convenient
8097 * number for each function using the offset which we use to indicate
8098diff -urNp linux-3.1.1/arch/x86/include/asm/pgalloc.h linux-3.1.1/arch/x86/include/asm/pgalloc.h
8099--- linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-11 15:19:27.000000000 -0500
8100+++ linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-16 18:39:07.000000000 -0500
8101@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8102 pmd_t *pmd, pte_t *pte)
8103 {
8104 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8105+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8106+}
8107+
8108+static inline void pmd_populate_user(struct mm_struct *mm,
8109+ pmd_t *pmd, pte_t *pte)
8110+{
8111+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8112 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8113 }
8114
8115diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-2level.h linux-3.1.1/arch/x86/include/asm/pgtable-2level.h
8116--- linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-11 15:19:27.000000000 -0500
8117+++ linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-16 18:39:07.000000000 -0500
8118@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8119
8120 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8121 {
8122+ pax_open_kernel();
8123 *pmdp = pmd;
8124+ pax_close_kernel();
8125 }
8126
8127 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8128diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32.h linux-3.1.1/arch/x86/include/asm/pgtable_32.h
8129--- linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
8130+++ linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
8131@@ -25,9 +25,6 @@
8132 struct mm_struct;
8133 struct vm_area_struct;
8134
8135-extern pgd_t swapper_pg_dir[1024];
8136-extern pgd_t initial_page_table[1024];
8137-
8138 static inline void pgtable_cache_init(void) { }
8139 static inline void check_pgt_cache(void) { }
8140 void paging_init(void);
8141@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8142 # include <asm/pgtable-2level.h>
8143 #endif
8144
8145+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8146+extern pgd_t initial_page_table[PTRS_PER_PGD];
8147+#ifdef CONFIG_X86_PAE
8148+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8149+#endif
8150+
8151 #if defined(CONFIG_HIGHPTE)
8152 #define pte_offset_map(dir, address) \
8153 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8154@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8155 /* Clear a kernel PTE and flush it from the TLB */
8156 #define kpte_clear_flush(ptep, vaddr) \
8157 do { \
8158+ pax_open_kernel(); \
8159 pte_clear(&init_mm, (vaddr), (ptep)); \
8160+ pax_close_kernel(); \
8161 __flush_tlb_one((vaddr)); \
8162 } while (0)
8163
8164@@ -74,6 +79,9 @@ do { \
8165
8166 #endif /* !__ASSEMBLY__ */
8167
8168+#define HAVE_ARCH_UNMAPPED_AREA
8169+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8170+
8171 /*
8172 * kern_addr_valid() is (1) for FLATMEM and (0) for
8173 * SPARSEMEM and DISCONTIGMEM
8174diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h
8175--- linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-11 15:19:27.000000000 -0500
8176+++ linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-16 18:39:07.000000000 -0500
8177@@ -8,7 +8,7 @@
8178 */
8179 #ifdef CONFIG_X86_PAE
8180 # include <asm/pgtable-3level_types.h>
8181-# define PMD_SIZE (1UL << PMD_SHIFT)
8182+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8183 # define PMD_MASK (~(PMD_SIZE - 1))
8184 #else
8185 # include <asm/pgtable-2level_types.h>
8186@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8187 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8188 #endif
8189
8190+#ifdef CONFIG_PAX_KERNEXEC
8191+#ifndef __ASSEMBLY__
8192+extern unsigned char MODULES_EXEC_VADDR[];
8193+extern unsigned char MODULES_EXEC_END[];
8194+#endif
8195+#include <asm/boot.h>
8196+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8197+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8198+#else
8199+#define ktla_ktva(addr) (addr)
8200+#define ktva_ktla(addr) (addr)
8201+#endif
8202+
8203 #define MODULES_VADDR VMALLOC_START
8204 #define MODULES_END VMALLOC_END
8205 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8206diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-3level.h linux-3.1.1/arch/x86/include/asm/pgtable-3level.h
8207--- linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-11 15:19:27.000000000 -0500
8208+++ linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-16 18:39:07.000000000 -0500
8209@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8210
8211 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8212 {
8213+ pax_open_kernel();
8214 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8215+ pax_close_kernel();
8216 }
8217
8218 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8219 {
8220+ pax_open_kernel();
8221 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8222+ pax_close_kernel();
8223 }
8224
8225 /*
8226diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64.h linux-3.1.1/arch/x86/include/asm/pgtable_64.h
8227--- linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-11 15:19:27.000000000 -0500
8228+++ linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-16 18:39:07.000000000 -0500
8229@@ -16,10 +16,13 @@
8230
8231 extern pud_t level3_kernel_pgt[512];
8232 extern pud_t level3_ident_pgt[512];
8233+extern pud_t level3_vmalloc_pgt[512];
8234+extern pud_t level3_vmemmap_pgt[512];
8235+extern pud_t level2_vmemmap_pgt[512];
8236 extern pmd_t level2_kernel_pgt[512];
8237 extern pmd_t level2_fixmap_pgt[512];
8238-extern pmd_t level2_ident_pgt[512];
8239-extern pgd_t init_level4_pgt[];
8240+extern pmd_t level2_ident_pgt[512*2];
8241+extern pgd_t init_level4_pgt[512];
8242
8243 #define swapper_pg_dir init_level4_pgt
8244
8245@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8246
8247 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8248 {
8249+ pax_open_kernel();
8250 *pmdp = pmd;
8251+ pax_close_kernel();
8252 }
8253
8254 static inline void native_pmd_clear(pmd_t *pmd)
8255@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8256
8257 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8258 {
8259+ pax_open_kernel();
8260+ *pgdp = pgd;
8261+ pax_close_kernel();
8262+}
8263+
8264+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8265+{
8266 *pgdp = pgd;
8267 }
8268
8269diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h
8270--- linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-11 15:19:27.000000000 -0500
8271+++ linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-16 18:39:07.000000000 -0500
8272@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8273 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8274 #define MODULES_END _AC(0xffffffffff000000, UL)
8275 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8276+#define MODULES_EXEC_VADDR MODULES_VADDR
8277+#define MODULES_EXEC_END MODULES_END
8278+
8279+#define ktla_ktva(addr) (addr)
8280+#define ktva_ktla(addr) (addr)
8281
8282 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8283diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable.h linux-3.1.1/arch/x86/include/asm/pgtable.h
8284--- linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
8285+++ linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
8286@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8287
8288 #ifndef __PAGETABLE_PUD_FOLDED
8289 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8290+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8291 #define pgd_clear(pgd) native_pgd_clear(pgd)
8292 #endif
8293
8294@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8295
8296 #define arch_end_context_switch(prev) do {} while(0)
8297
8298+#define pax_open_kernel() native_pax_open_kernel()
8299+#define pax_close_kernel() native_pax_close_kernel()
8300 #endif /* CONFIG_PARAVIRT */
8301
8302+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8303+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8304+
8305+#ifdef CONFIG_PAX_KERNEXEC
8306+static inline unsigned long native_pax_open_kernel(void)
8307+{
8308+ unsigned long cr0;
8309+
8310+ preempt_disable();
8311+ barrier();
8312+ cr0 = read_cr0() ^ X86_CR0_WP;
8313+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8314+ write_cr0(cr0);
8315+ return cr0 ^ X86_CR0_WP;
8316+}
8317+
8318+static inline unsigned long native_pax_close_kernel(void)
8319+{
8320+ unsigned long cr0;
8321+
8322+ cr0 = read_cr0() ^ X86_CR0_WP;
8323+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8324+ write_cr0(cr0);
8325+ barrier();
8326+ preempt_enable_no_resched();
8327+ return cr0 ^ X86_CR0_WP;
8328+}
8329+#else
8330+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8331+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8332+#endif
8333+
8334 /*
8335 * The following only work if pte_present() is true.
8336 * Undefined behaviour if not..
8337 */
8338+static inline int pte_user(pte_t pte)
8339+{
8340+ return pte_val(pte) & _PAGE_USER;
8341+}
8342+
8343 static inline int pte_dirty(pte_t pte)
8344 {
8345 return pte_flags(pte) & _PAGE_DIRTY;
8346@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8347 return pte_clear_flags(pte, _PAGE_RW);
8348 }
8349
8350+static inline pte_t pte_mkread(pte_t pte)
8351+{
8352+ return __pte(pte_val(pte) | _PAGE_USER);
8353+}
8354+
8355 static inline pte_t pte_mkexec(pte_t pte)
8356 {
8357- return pte_clear_flags(pte, _PAGE_NX);
8358+#ifdef CONFIG_X86_PAE
8359+ if (__supported_pte_mask & _PAGE_NX)
8360+ return pte_clear_flags(pte, _PAGE_NX);
8361+ else
8362+#endif
8363+ return pte_set_flags(pte, _PAGE_USER);
8364+}
8365+
8366+static inline pte_t pte_exprotect(pte_t pte)
8367+{
8368+#ifdef CONFIG_X86_PAE
8369+ if (__supported_pte_mask & _PAGE_NX)
8370+ return pte_set_flags(pte, _PAGE_NX);
8371+ else
8372+#endif
8373+ return pte_clear_flags(pte, _PAGE_USER);
8374 }
8375
8376 static inline pte_t pte_mkdirty(pte_t pte)
8377@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8378 #endif
8379
8380 #ifndef __ASSEMBLY__
8381+
8382+#ifdef CONFIG_PAX_PER_CPU_PGD
8383+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8384+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8385+{
8386+ return cpu_pgd[cpu];
8387+}
8388+#endif
8389+
8390 #include <linux/mm_types.h>
8391
8392 static inline int pte_none(pte_t pte)
8393@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8394
8395 static inline int pgd_bad(pgd_t pgd)
8396 {
8397- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8398+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8399 }
8400
8401 static inline int pgd_none(pgd_t pgd)
8402@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8403 * pgd_offset() returns a (pgd_t *)
8404 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8405 */
8406-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8407+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8408+
8409+#ifdef CONFIG_PAX_PER_CPU_PGD
8410+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8411+#endif
8412+
8413 /*
8414 * a shortcut which implies the use of the kernel's pgd, instead
8415 * of a process's
8416@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8417 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8418 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8419
8420+#ifdef CONFIG_X86_32
8421+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8422+#else
8423+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8424+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8425+
8426+#ifdef CONFIG_PAX_MEMORY_UDEREF
8427+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8428+#else
8429+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8430+#endif
8431+
8432+#endif
8433+
8434 #ifndef __ASSEMBLY__
8435
8436 extern int direct_gbpages;
8437@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8438 * dst and src can be on the same page, but the range must not overlap,
8439 * and must not cross a page boundary.
8440 */
8441-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8442+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8443 {
8444- memcpy(dst, src, count * sizeof(pgd_t));
8445+ pax_open_kernel();
8446+ while (count--)
8447+ *dst++ = *src++;
8448+ pax_close_kernel();
8449 }
8450
8451+#ifdef CONFIG_PAX_PER_CPU_PGD
8452+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8453+#endif
8454+
8455+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8456+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8457+#else
8458+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8459+#endif
8460
8461 #include <asm-generic/pgtable.h>
8462 #endif /* __ASSEMBLY__ */
8463diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_types.h linux-3.1.1/arch/x86/include/asm/pgtable_types.h
8464--- linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-11 15:19:27.000000000 -0500
8465+++ linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-16 18:39:07.000000000 -0500
8466@@ -16,13 +16,12 @@
8467 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8468 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8469 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8470-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8471+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8472 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8473 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8474 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8475-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8476-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8477-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8478+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8479+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8480 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8481
8482 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8483@@ -40,7 +39,6 @@
8484 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8485 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8486 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8487-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8488 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8489 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8490 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8491@@ -57,8 +55,10 @@
8492
8493 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8494 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8495-#else
8496+#elif defined(CONFIG_KMEMCHECK)
8497 #define _PAGE_NX (_AT(pteval_t, 0))
8498+#else
8499+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8500 #endif
8501
8502 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8503@@ -96,6 +96,9 @@
8504 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8505 _PAGE_ACCESSED)
8506
8507+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8508+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8509+
8510 #define __PAGE_KERNEL_EXEC \
8511 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8512 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8513@@ -106,7 +109,7 @@
8514 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8515 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8516 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8517-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8518+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8519 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8520 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
8521 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8522@@ -168,8 +171,8 @@
8523 * bits are combined, this will alow user to access the high address mapped
8524 * VDSO in the presence of CONFIG_COMPAT_VDSO
8525 */
8526-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8527-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8528+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8529+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8530 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8531 #endif
8532
8533@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8534 {
8535 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8536 }
8537+#endif
8538
8539+#if PAGETABLE_LEVELS == 3
8540+#include <asm-generic/pgtable-nopud.h>
8541+#endif
8542+
8543+#if PAGETABLE_LEVELS == 2
8544+#include <asm-generic/pgtable-nopmd.h>
8545+#endif
8546+
8547+#ifndef __ASSEMBLY__
8548 #if PAGETABLE_LEVELS > 3
8549 typedef struct { pudval_t pud; } pud_t;
8550
8551@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
8552 return pud.pud;
8553 }
8554 #else
8555-#include <asm-generic/pgtable-nopud.h>
8556-
8557 static inline pudval_t native_pud_val(pud_t pud)
8558 {
8559 return native_pgd_val(pud.pgd);
8560@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
8561 return pmd.pmd;
8562 }
8563 #else
8564-#include <asm-generic/pgtable-nopmd.h>
8565-
8566 static inline pmdval_t native_pmd_val(pmd_t pmd)
8567 {
8568 return native_pgd_val(pmd.pud.pgd);
8569@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
8570
8571 extern pteval_t __supported_pte_mask;
8572 extern void set_nx(void);
8573-extern int nx_enabled;
8574
8575 #define pgprot_writecombine pgprot_writecombine
8576 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8577diff -urNp linux-3.1.1/arch/x86/include/asm/processor.h linux-3.1.1/arch/x86/include/asm/processor.h
8578--- linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-11 15:19:27.000000000 -0500
8579+++ linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-16 18:39:07.000000000 -0500
8580@@ -266,7 +266,7 @@ struct tss_struct {
8581
8582 } ____cacheline_aligned;
8583
8584-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8585+extern struct tss_struct init_tss[NR_CPUS];
8586
8587 /*
8588 * Save the original ist values for checking stack pointers during debugging
8589@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(co
8590 */
8591 #define TASK_SIZE PAGE_OFFSET
8592 #define TASK_SIZE_MAX TASK_SIZE
8593+
8594+#ifdef CONFIG_PAX_SEGMEXEC
8595+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8596+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8597+#else
8598 #define STACK_TOP TASK_SIZE
8599-#define STACK_TOP_MAX STACK_TOP
8600+#endif
8601+
8602+#define STACK_TOP_MAX TASK_SIZE
8603
8604 #define INIT_THREAD { \
8605- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8606+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8607 .vm86_info = NULL, \
8608 .sysenter_cs = __KERNEL_CS, \
8609 .io_bitmap_ptr = NULL, \
8610@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(co
8611 */
8612 #define INIT_TSS { \
8613 .x86_tss = { \
8614- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8615+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8616 .ss0 = __KERNEL_DS, \
8617 .ss1 = __KERNEL_CS, \
8618 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8619@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(co
8620 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8621
8622 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8623-#define KSTK_TOP(info) \
8624-({ \
8625- unsigned long *__ptr = (unsigned long *)(info); \
8626- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8627-})
8628+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8629
8630 /*
8631 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8632@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(str
8633 #define task_pt_regs(task) \
8634 ({ \
8635 struct pt_regs *__regs__; \
8636- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8637+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8638 __regs__ - 1; \
8639 })
8640
8641@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(str
8642 /*
8643 * User space process size. 47bits minus one guard page.
8644 */
8645-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8646+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8647
8648 /* This decides where the kernel will search for a free chunk of vm
8649 * space during mmap's.
8650 */
8651 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8652- 0xc0000000 : 0xFFFFe000)
8653+ 0xc0000000 : 0xFFFFf000)
8654
8655 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8656 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8657@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(str
8658 #define STACK_TOP_MAX TASK_SIZE_MAX
8659
8660 #define INIT_THREAD { \
8661- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8662+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8663 }
8664
8665 #define INIT_TSS { \
8666- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8667+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8668 }
8669
8670 /*
8671@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs
8672 */
8673 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8674
8675+#ifdef CONFIG_PAX_SEGMEXEC
8676+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8677+#endif
8678+
8679 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8680
8681 /* Get/set a process' ability to use the timestamp counter instruction */
8682diff -urNp linux-3.1.1/arch/x86/include/asm/ptrace.h linux-3.1.1/arch/x86/include/asm/ptrace.h
8683--- linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-11 15:19:27.000000000 -0500
8684+++ linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-16 18:39:07.000000000 -0500
8685@@ -156,28 +156,29 @@ static inline unsigned long regs_return_
8686 }
8687
8688 /*
8689- * user_mode_vm(regs) determines whether a register set came from user mode.
8690+ * user_mode(regs) determines whether a register set came from user mode.
8691 * This is true if V8086 mode was enabled OR if the register set was from
8692 * protected mode with RPL-3 CS value. This tricky test checks that with
8693 * one comparison. Many places in the kernel can bypass this full check
8694- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8695+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8696+ * be used.
8697 */
8698-static inline int user_mode(struct pt_regs *regs)
8699+static inline int user_mode_novm(struct pt_regs *regs)
8700 {
8701 #ifdef CONFIG_X86_32
8702 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8703 #else
8704- return !!(regs->cs & 3);
8705+ return !!(regs->cs & SEGMENT_RPL_MASK);
8706 #endif
8707 }
8708
8709-static inline int user_mode_vm(struct pt_regs *regs)
8710+static inline int user_mode(struct pt_regs *regs)
8711 {
8712 #ifdef CONFIG_X86_32
8713 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8714 USER_RPL;
8715 #else
8716- return user_mode(regs);
8717+ return user_mode_novm(regs);
8718 #endif
8719 }
8720
8721@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_r
8722 #ifdef CONFIG_X86_64
8723 static inline bool user_64bit_mode(struct pt_regs *regs)
8724 {
8725+ unsigned long cs = regs->cs & 0xffff;
8726 #ifndef CONFIG_PARAVIRT
8727 /*
8728 * On non-paravirt systems, this is the only long mode CPL 3
8729 * selector. We do not allow long mode selectors in the LDT.
8730 */
8731- return regs->cs == __USER_CS;
8732+ return cs == __USER_CS;
8733 #else
8734 /* Headers are too twisted for this to go in paravirt.h. */
8735- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
8736+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
8737 #endif
8738 }
8739 #endif
8740diff -urNp linux-3.1.1/arch/x86/include/asm/reboot.h linux-3.1.1/arch/x86/include/asm/reboot.h
8741--- linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-11 15:19:27.000000000 -0500
8742+++ linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-16 18:39:07.000000000 -0500
8743@@ -6,19 +6,19 @@
8744 struct pt_regs;
8745
8746 struct machine_ops {
8747- void (*restart)(char *cmd);
8748- void (*halt)(void);
8749- void (*power_off)(void);
8750+ void (* __noreturn restart)(char *cmd);
8751+ void (* __noreturn halt)(void);
8752+ void (* __noreturn power_off)(void);
8753 void (*shutdown)(void);
8754 void (*crash_shutdown)(struct pt_regs *);
8755- void (*emergency_restart)(void);
8756-};
8757+ void (* __noreturn emergency_restart)(void);
8758+} __no_const;
8759
8760 extern struct machine_ops machine_ops;
8761
8762 void native_machine_crash_shutdown(struct pt_regs *regs);
8763 void native_machine_shutdown(void);
8764-void machine_real_restart(unsigned int type);
8765+void machine_real_restart(unsigned int type) __noreturn;
8766 /* These must match dispatch_table in reboot_32.S */
8767 #define MRR_BIOS 0
8768 #define MRR_APM 1
8769diff -urNp linux-3.1.1/arch/x86/include/asm/rwsem.h linux-3.1.1/arch/x86/include/asm/rwsem.h
8770--- linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-11 15:19:27.000000000 -0500
8771+++ linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-16 18:39:07.000000000 -0500
8772@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8773 {
8774 asm volatile("# beginning down_read\n\t"
8775 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8776+
8777+#ifdef CONFIG_PAX_REFCOUNT
8778+ "jno 0f\n"
8779+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8780+ "int $4\n0:\n"
8781+ _ASM_EXTABLE(0b, 0b)
8782+#endif
8783+
8784 /* adds 0x00000001 */
8785 " jns 1f\n"
8786 " call call_rwsem_down_read_failed\n"
8787@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8788 "1:\n\t"
8789 " mov %1,%2\n\t"
8790 " add %3,%2\n\t"
8791+
8792+#ifdef CONFIG_PAX_REFCOUNT
8793+ "jno 0f\n"
8794+ "sub %3,%2\n"
8795+ "int $4\n0:\n"
8796+ _ASM_EXTABLE(0b, 0b)
8797+#endif
8798+
8799 " jle 2f\n\t"
8800 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8801 " jnz 1b\n\t"
8802@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8803 long tmp;
8804 asm volatile("# beginning down_write\n\t"
8805 LOCK_PREFIX " xadd %1,(%2)\n\t"
8806+
8807+#ifdef CONFIG_PAX_REFCOUNT
8808+ "jno 0f\n"
8809+ "mov %1,(%2)\n"
8810+ "int $4\n0:\n"
8811+ _ASM_EXTABLE(0b, 0b)
8812+#endif
8813+
8814 /* adds 0xffff0001, returns the old value */
8815 " test %1,%1\n\t"
8816 /* was the count 0 before? */
8817@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8818 long tmp;
8819 asm volatile("# beginning __up_read\n\t"
8820 LOCK_PREFIX " xadd %1,(%2)\n\t"
8821+
8822+#ifdef CONFIG_PAX_REFCOUNT
8823+ "jno 0f\n"
8824+ "mov %1,(%2)\n"
8825+ "int $4\n0:\n"
8826+ _ASM_EXTABLE(0b, 0b)
8827+#endif
8828+
8829 /* subtracts 1, returns the old value */
8830 " jns 1f\n\t"
8831 " call call_rwsem_wake\n" /* expects old value in %edx */
8832@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8833 long tmp;
8834 asm volatile("# beginning __up_write\n\t"
8835 LOCK_PREFIX " xadd %1,(%2)\n\t"
8836+
8837+#ifdef CONFIG_PAX_REFCOUNT
8838+ "jno 0f\n"
8839+ "mov %1,(%2)\n"
8840+ "int $4\n0:\n"
8841+ _ASM_EXTABLE(0b, 0b)
8842+#endif
8843+
8844 /* subtracts 0xffff0001, returns the old value */
8845 " jns 1f\n\t"
8846 " call call_rwsem_wake\n" /* expects old value in %edx */
8847@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8848 {
8849 asm volatile("# beginning __downgrade_write\n\t"
8850 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8851+
8852+#ifdef CONFIG_PAX_REFCOUNT
8853+ "jno 0f\n"
8854+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8855+ "int $4\n0:\n"
8856+ _ASM_EXTABLE(0b, 0b)
8857+#endif
8858+
8859 /*
8860 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8861 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8862@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8863 */
8864 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8865 {
8866- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8867+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8868+
8869+#ifdef CONFIG_PAX_REFCOUNT
8870+ "jno 0f\n"
8871+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8872+ "int $4\n0:\n"
8873+ _ASM_EXTABLE(0b, 0b)
8874+#endif
8875+
8876 : "+m" (sem->count)
8877 : "er" (delta));
8878 }
8879@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8880 {
8881 long tmp = delta;
8882
8883- asm volatile(LOCK_PREFIX "xadd %0,%1"
8884+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8885+
8886+#ifdef CONFIG_PAX_REFCOUNT
8887+ "jno 0f\n"
8888+ "mov %0,%1\n"
8889+ "int $4\n0:\n"
8890+ _ASM_EXTABLE(0b, 0b)
8891+#endif
8892+
8893 : "+r" (tmp), "+m" (sem->count)
8894 : : "memory");
8895
8896diff -urNp linux-3.1.1/arch/x86/include/asm/segment.h linux-3.1.1/arch/x86/include/asm/segment.h
8897--- linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-11 15:19:27.000000000 -0500
8898+++ linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-16 18:39:07.000000000 -0500
8899@@ -64,10 +64,15 @@
8900 * 26 - ESPFIX small SS
8901 * 27 - per-cpu [ offset to per-cpu data area ]
8902 * 28 - stack_canary-20 [ for stack protector ]
8903- * 29 - unused
8904- * 30 - unused
8905+ * 29 - PCI BIOS CS
8906+ * 30 - PCI BIOS DS
8907 * 31 - TSS for double fault handler
8908 */
8909+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8910+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8911+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8912+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8913+
8914 #define GDT_ENTRY_TLS_MIN 6
8915 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8916
8917@@ -79,6 +84,8 @@
8918
8919 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8920
8921+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8922+
8923 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8924
8925 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8926@@ -104,6 +111,12 @@
8927 #define __KERNEL_STACK_CANARY 0
8928 #endif
8929
8930+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8931+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8932+
8933+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8934+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8935+
8936 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8937
8938 /*
8939@@ -141,7 +154,7 @@
8940 */
8941
8942 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8943-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8944+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8945
8946
8947 #else
8948@@ -165,6 +178,8 @@
8949 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
8950 #define __USER32_DS __USER_DS
8951
8952+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8953+
8954 #define GDT_ENTRY_TSS 8 /* needs two entries */
8955 #define GDT_ENTRY_LDT 10 /* needs two entries */
8956 #define GDT_ENTRY_TLS_MIN 12
8957@@ -185,6 +200,7 @@
8958 #endif
8959
8960 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8961+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8962 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8963 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8964 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8965diff -urNp linux-3.1.1/arch/x86/include/asm/smp.h linux-3.1.1/arch/x86/include/asm/smp.h
8966--- linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-11 15:19:27.000000000 -0500
8967+++ linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-16 18:39:07.000000000 -0500
8968@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8969 /* cpus sharing the last level cache: */
8970 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8971 DECLARE_PER_CPU(u16, cpu_llc_id);
8972-DECLARE_PER_CPU(int, cpu_number);
8973+DECLARE_PER_CPU(unsigned int, cpu_number);
8974
8975 static inline struct cpumask *cpu_sibling_mask(int cpu)
8976 {
8977@@ -77,7 +77,7 @@ struct smp_ops {
8978
8979 void (*send_call_func_ipi)(const struct cpumask *mask);
8980 void (*send_call_func_single_ipi)(int cpu);
8981-};
8982+} __no_const;
8983
8984 /* Globals due to paravirt */
8985 extern void set_cpu_sibling_map(int cpu);
8986@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8987 extern int safe_smp_processor_id(void);
8988
8989 #elif defined(CONFIG_X86_64_SMP)
8990-#define raw_smp_processor_id() (percpu_read(cpu_number))
8991-
8992-#define stack_smp_processor_id() \
8993-({ \
8994- struct thread_info *ti; \
8995- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8996- ti->cpu; \
8997-})
8998+#define raw_smp_processor_id() (percpu_read(cpu_number))
8999+#define stack_smp_processor_id() raw_smp_processor_id()
9000 #define safe_smp_processor_id() smp_processor_id()
9001
9002 #endif
9003diff -urNp linux-3.1.1/arch/x86/include/asm/spinlock.h linux-3.1.1/arch/x86/include/asm/spinlock.h
9004--- linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
9005+++ linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
9006@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(ar
9007 static inline void arch_read_lock(arch_rwlock_t *rw)
9008 {
9009 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
9010+
9011+#ifdef CONFIG_PAX_REFCOUNT
9012+ "jno 0f\n"
9013+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
9014+ "int $4\n0:\n"
9015+ _ASM_EXTABLE(0b, 0b)
9016+#endif
9017+
9018 "jns 1f\n"
9019 "call __read_lock_failed\n\t"
9020 "1:\n"
9021@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_r
9022 static inline void arch_write_lock(arch_rwlock_t *rw)
9023 {
9024 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
9025+
9026+#ifdef CONFIG_PAX_REFCOUNT
9027+ "jno 0f\n"
9028+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
9029+ "int $4\n0:\n"
9030+ _ASM_EXTABLE(0b, 0b)
9031+#endif
9032+
9033 "jz 1f\n"
9034 "call __write_lock_failed\n\t"
9035 "1:\n"
9036@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arc
9037
9038 static inline void arch_read_unlock(arch_rwlock_t *rw)
9039 {
9040- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
9041+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
9042+
9043+#ifdef CONFIG_PAX_REFCOUNT
9044+ "jno 0f\n"
9045+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
9046+ "int $4\n0:\n"
9047+ _ASM_EXTABLE(0b, 0b)
9048+#endif
9049+
9050 :"+m" (rw->lock) : : "memory");
9051 }
9052
9053 static inline void arch_write_unlock(arch_rwlock_t *rw)
9054 {
9055- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
9056+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
9057+
9058+#ifdef CONFIG_PAX_REFCOUNT
9059+ "jno 0f\n"
9060+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
9061+ "int $4\n0:\n"
9062+ _ASM_EXTABLE(0b, 0b)
9063+#endif
9064+
9065 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
9066 }
9067
9068diff -urNp linux-3.1.1/arch/x86/include/asm/stackprotector.h linux-3.1.1/arch/x86/include/asm/stackprotector.h
9069--- linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-11 15:19:27.000000000 -0500
9070+++ linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-16 18:39:07.000000000 -0500
9071@@ -48,7 +48,7 @@
9072 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9073 */
9074 #define GDT_STACK_CANARY_INIT \
9075- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9076+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9077
9078 /*
9079 * Initialize the stackprotector canary value.
9080@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9081
9082 static inline void load_stack_canary_segment(void)
9083 {
9084-#ifdef CONFIG_X86_32
9085+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9086 asm volatile ("mov %0, %%gs" : : "r" (0));
9087 #endif
9088 }
9089diff -urNp linux-3.1.1/arch/x86/include/asm/stacktrace.h linux-3.1.1/arch/x86/include/asm/stacktrace.h
9090--- linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-11 15:19:27.000000000 -0500
9091+++ linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-16 18:39:07.000000000 -0500
9092@@ -11,28 +11,20 @@
9093
9094 extern int kstack_depth_to_print;
9095
9096-struct thread_info;
9097+struct task_struct;
9098 struct stacktrace_ops;
9099
9100-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9101- unsigned long *stack,
9102- unsigned long bp,
9103- const struct stacktrace_ops *ops,
9104- void *data,
9105- unsigned long *end,
9106- int *graph);
9107-
9108-extern unsigned long
9109-print_context_stack(struct thread_info *tinfo,
9110- unsigned long *stack, unsigned long bp,
9111- const struct stacktrace_ops *ops, void *data,
9112- unsigned long *end, int *graph);
9113-
9114-extern unsigned long
9115-print_context_stack_bp(struct thread_info *tinfo,
9116- unsigned long *stack, unsigned long bp,
9117- const struct stacktrace_ops *ops, void *data,
9118- unsigned long *end, int *graph);
9119+typedef unsigned long walk_stack_t(struct task_struct *task,
9120+ void *stack_start,
9121+ unsigned long *stack,
9122+ unsigned long bp,
9123+ const struct stacktrace_ops *ops,
9124+ void *data,
9125+ unsigned long *end,
9126+ int *graph);
9127+
9128+extern walk_stack_t print_context_stack;
9129+extern walk_stack_t print_context_stack_bp;
9130
9131 /* Generic stack tracer with callbacks */
9132
9133@@ -40,7 +32,7 @@ struct stacktrace_ops {
9134 void (*address)(void *data, unsigned long address, int reliable);
9135 /* On negative return stop dumping */
9136 int (*stack)(void *data, char *name);
9137- walk_stack_t walk_stack;
9138+ walk_stack_t *walk_stack;
9139 };
9140
9141 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9142diff -urNp linux-3.1.1/arch/x86/include/asm/sys_ia32.h linux-3.1.1/arch/x86/include/asm/sys_ia32.h
9143--- linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-11 15:19:27.000000000 -0500
9144+++ linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-16 18:39:07.000000000 -0500
9145@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9146 compat_sigset_t __user *, unsigned int);
9147 asmlinkage long sys32_alarm(unsigned int);
9148
9149-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9150+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9151 asmlinkage long sys32_sysfs(int, u32, u32);
9152
9153 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9154diff -urNp linux-3.1.1/arch/x86/include/asm/system.h linux-3.1.1/arch/x86/include/asm/system.h
9155--- linux-3.1.1/arch/x86/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
9156+++ linux-3.1.1/arch/x86/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
9157@@ -129,7 +129,7 @@ do { \
9158 "call __switch_to\n\t" \
9159 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9160 __switch_canary \
9161- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9162+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9163 "movq %%rax,%%rdi\n\t" \
9164 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9165 "jnz ret_from_fork\n\t" \
9166@@ -140,7 +140,7 @@ do { \
9167 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9168 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9169 [_tif_fork] "i" (_TIF_FORK), \
9170- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9171+ [thread_info] "m" (current_tinfo), \
9172 [current_task] "m" (current_task) \
9173 __switch_canary_iparam \
9174 : "memory", "cc" __EXTRA_CLOBBER)
9175@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9176 {
9177 unsigned long __limit;
9178 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9179- return __limit + 1;
9180+ return __limit;
9181 }
9182
9183 static inline void native_clts(void)
9184@@ -397,12 +397,12 @@ void enable_hlt(void);
9185
9186 void cpu_idle_wait(void);
9187
9188-extern unsigned long arch_align_stack(unsigned long sp);
9189+#define arch_align_stack(x) ((x) & ~0xfUL)
9190 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9191
9192 void default_idle(void);
9193
9194-void stop_this_cpu(void *dummy);
9195+void stop_this_cpu(void *dummy) __noreturn;
9196
9197 /*
9198 * Force strict CPU ordering.
9199diff -urNp linux-3.1.1/arch/x86/include/asm/thread_info.h linux-3.1.1/arch/x86/include/asm/thread_info.h
9200--- linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-11 15:19:27.000000000 -0500
9201+++ linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-16 18:39:07.000000000 -0500
9202@@ -10,6 +10,7 @@
9203 #include <linux/compiler.h>
9204 #include <asm/page.h>
9205 #include <asm/types.h>
9206+#include <asm/percpu.h>
9207
9208 /*
9209 * low level task data that entry.S needs immediate access to
9210@@ -24,7 +25,6 @@ struct exec_domain;
9211 #include <linux/atomic.h>
9212
9213 struct thread_info {
9214- struct task_struct *task; /* main task structure */
9215 struct exec_domain *exec_domain; /* execution domain */
9216 __u32 flags; /* low level flags */
9217 __u32 status; /* thread synchronous flags */
9218@@ -34,18 +34,12 @@ struct thread_info {
9219 mm_segment_t addr_limit;
9220 struct restart_block restart_block;
9221 void __user *sysenter_return;
9222-#ifdef CONFIG_X86_32
9223- unsigned long previous_esp; /* ESP of the previous stack in
9224- case of nested (IRQ) stacks
9225- */
9226- __u8 supervisor_stack[0];
9227-#endif
9228+ unsigned long lowest_stack;
9229 int uaccess_err;
9230 };
9231
9232-#define INIT_THREAD_INFO(tsk) \
9233+#define INIT_THREAD_INFO \
9234 { \
9235- .task = &tsk, \
9236 .exec_domain = &default_exec_domain, \
9237 .flags = 0, \
9238 .cpu = 0, \
9239@@ -56,7 +50,7 @@ struct thread_info {
9240 }, \
9241 }
9242
9243-#define init_thread_info (init_thread_union.thread_info)
9244+#define init_thread_info (init_thread_union.stack)
9245 #define init_stack (init_thread_union.stack)
9246
9247 #else /* !__ASSEMBLY__ */
9248@@ -170,6 +164,23 @@ struct thread_info {
9249 ret; \
9250 })
9251
9252+#ifdef __ASSEMBLY__
9253+/* how to get the thread information struct from ASM */
9254+#define GET_THREAD_INFO(reg) \
9255+ mov PER_CPU_VAR(current_tinfo), reg
9256+
9257+/* use this one if reg already contains %esp */
9258+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9259+#else
9260+/* how to get the thread information struct from C */
9261+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9262+
9263+static __always_inline struct thread_info *current_thread_info(void)
9264+{
9265+ return percpu_read_stable(current_tinfo);
9266+}
9267+#endif
9268+
9269 #ifdef CONFIG_X86_32
9270
9271 #define STACK_WARN (THREAD_SIZE/8)
9272@@ -180,35 +191,13 @@ struct thread_info {
9273 */
9274 #ifndef __ASSEMBLY__
9275
9276-
9277 /* how to get the current stack pointer from C */
9278 register unsigned long current_stack_pointer asm("esp") __used;
9279
9280-/* how to get the thread information struct from C */
9281-static inline struct thread_info *current_thread_info(void)
9282-{
9283- return (struct thread_info *)
9284- (current_stack_pointer & ~(THREAD_SIZE - 1));
9285-}
9286-
9287-#else /* !__ASSEMBLY__ */
9288-
9289-/* how to get the thread information struct from ASM */
9290-#define GET_THREAD_INFO(reg) \
9291- movl $-THREAD_SIZE, reg; \
9292- andl %esp, reg
9293-
9294-/* use this one if reg already contains %esp */
9295-#define GET_THREAD_INFO_WITH_ESP(reg) \
9296- andl $-THREAD_SIZE, reg
9297-
9298 #endif
9299
9300 #else /* X86_32 */
9301
9302-#include <asm/percpu.h>
9303-#define KERNEL_STACK_OFFSET (5*8)
9304-
9305 /*
9306 * macros/functions for gaining access to the thread information structure
9307 * preempt_count needs to be 1 initially, until the scheduler is functional.
9308@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9309 #ifndef __ASSEMBLY__
9310 DECLARE_PER_CPU(unsigned long, kernel_stack);
9311
9312-static inline struct thread_info *current_thread_info(void)
9313-{
9314- struct thread_info *ti;
9315- ti = (void *)(percpu_read_stable(kernel_stack) +
9316- KERNEL_STACK_OFFSET - THREAD_SIZE);
9317- return ti;
9318-}
9319-
9320-#else /* !__ASSEMBLY__ */
9321-
9322-/* how to get the thread information struct from ASM */
9323-#define GET_THREAD_INFO(reg) \
9324- movq PER_CPU_VAR(kernel_stack),reg ; \
9325- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9326-
9327+/* how to get the current stack pointer from C */
9328+register unsigned long current_stack_pointer asm("rsp") __used;
9329 #endif
9330
9331 #endif /* !X86_32 */
9332@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9333 extern void free_thread_info(struct thread_info *ti);
9334 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9335 #define arch_task_cache_init arch_task_cache_init
9336+
9337+#define __HAVE_THREAD_FUNCTIONS
9338+#define task_thread_info(task) (&(task)->tinfo)
9339+#define task_stack_page(task) ((task)->stack)
9340+#define setup_thread_stack(p, org) do {} while (0)
9341+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9342+
9343+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9344+extern struct task_struct *alloc_task_struct_node(int node);
9345+extern void free_task_struct(struct task_struct *);
9346+
9347 #endif
9348 #endif /* _ASM_X86_THREAD_INFO_H */
9349diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_32.h linux-3.1.1/arch/x86/include/asm/uaccess_32.h
9350--- linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
9351+++ linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-16 18:40:08.000000000 -0500
9352@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9353 static __always_inline unsigned long __must_check
9354 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9355 {
9356+ pax_track_stack();
9357+
9358+ if ((long)n < 0)
9359+ return n;
9360+
9361 if (__builtin_constant_p(n)) {
9362 unsigned long ret;
9363
9364@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9365 return ret;
9366 }
9367 }
9368+ if (!__builtin_constant_p(n))
9369+ check_object_size(from, n, true);
9370 return __copy_to_user_ll(to, from, n);
9371 }
9372
9373@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9374 __copy_to_user(void __user *to, const void *from, unsigned long n)
9375 {
9376 might_fault();
9377+
9378 return __copy_to_user_inatomic(to, from, n);
9379 }
9380
9381 static __always_inline unsigned long
9382 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9383 {
9384+ if ((long)n < 0)
9385+ return n;
9386+
9387 /* Avoid zeroing the tail if the copy fails..
9388 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9389 * but as the zeroing behaviour is only significant when n is not
9390@@ -137,6 +148,12 @@ static __always_inline unsigned long
9391 __copy_from_user(void *to, const void __user *from, unsigned long n)
9392 {
9393 might_fault();
9394+
9395+ pax_track_stack();
9396+
9397+ if ((long)n < 0)
9398+ return n;
9399+
9400 if (__builtin_constant_p(n)) {
9401 unsigned long ret;
9402
9403@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9404 return ret;
9405 }
9406 }
9407+ if (!__builtin_constant_p(n))
9408+ check_object_size(to, n, false);
9409 return __copy_from_user_ll(to, from, n);
9410 }
9411
9412@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9413 const void __user *from, unsigned long n)
9414 {
9415 might_fault();
9416+
9417+ if ((long)n < 0)
9418+ return n;
9419+
9420 if (__builtin_constant_p(n)) {
9421 unsigned long ret;
9422
9423@@ -181,15 +204,19 @@ static __always_inline unsigned long
9424 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9425 unsigned long n)
9426 {
9427- return __copy_from_user_ll_nocache_nozero(to, from, n);
9428-}
9429+ if ((long)n < 0)
9430+ return n;
9431
9432-unsigned long __must_check copy_to_user(void __user *to,
9433- const void *from, unsigned long n);
9434-unsigned long __must_check _copy_from_user(void *to,
9435- const void __user *from,
9436- unsigned long n);
9437+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9438+}
9439
9440+extern void copy_to_user_overflow(void)
9441+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9442+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9443+#else
9444+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9445+#endif
9446+;
9447
9448 extern void copy_from_user_overflow(void)
9449 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9450@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9451 #endif
9452 ;
9453
9454-static inline unsigned long __must_check copy_from_user(void *to,
9455- const void __user *from,
9456- unsigned long n)
9457+/**
9458+ * copy_to_user: - Copy a block of data into user space.
9459+ * @to: Destination address, in user space.
9460+ * @from: Source address, in kernel space.
9461+ * @n: Number of bytes to copy.
9462+ *
9463+ * Context: User context only. This function may sleep.
9464+ *
9465+ * Copy data from kernel space to user space.
9466+ *
9467+ * Returns number of bytes that could not be copied.
9468+ * On success, this will be zero.
9469+ */
9470+static inline unsigned long __must_check
9471+copy_to_user(void __user *to, const void *from, unsigned long n)
9472+{
9473+ int sz = __compiletime_object_size(from);
9474+
9475+ if (unlikely(sz != -1 && sz < n))
9476+ copy_to_user_overflow();
9477+ else if (access_ok(VERIFY_WRITE, to, n))
9478+ n = __copy_to_user(to, from, n);
9479+ return n;
9480+}
9481+
9482+/**
9483+ * copy_from_user: - Copy a block of data from user space.
9484+ * @to: Destination address, in kernel space.
9485+ * @from: Source address, in user space.
9486+ * @n: Number of bytes to copy.
9487+ *
9488+ * Context: User context only. This function may sleep.
9489+ *
9490+ * Copy data from user space to kernel space.
9491+ *
9492+ * Returns number of bytes that could not be copied.
9493+ * On success, this will be zero.
9494+ *
9495+ * If some data could not be copied, this function will pad the copied
9496+ * data to the requested size using zero bytes.
9497+ */
9498+static inline unsigned long __must_check
9499+copy_from_user(void *to, const void __user *from, unsigned long n)
9500 {
9501 int sz = __compiletime_object_size(to);
9502
9503- if (likely(sz == -1 || sz >= n))
9504- n = _copy_from_user(to, from, n);
9505- else
9506+ if (unlikely(sz != -1 && sz < n))
9507 copy_from_user_overflow();
9508-
9509+ else if (access_ok(VERIFY_READ, from, n))
9510+ n = __copy_from_user(to, from, n);
9511+ else if ((long)n > 0) {
9512+ if (!__builtin_constant_p(n))
9513+ check_object_size(to, n, false);
9514+ memset(to, 0, n);
9515+ }
9516 return n;
9517 }
9518
9519diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_64.h linux-3.1.1/arch/x86/include/asm/uaccess_64.h
9520--- linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
9521+++ linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-16 18:40:08.000000000 -0500
9522@@ -10,6 +10,9 @@
9523 #include <asm/alternative.h>
9524 #include <asm/cpufeature.h>
9525 #include <asm/page.h>
9526+#include <asm/pgtable.h>
9527+
9528+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9529
9530 /*
9531 * Copy To/From Userspace
9532@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9533 return ret;
9534 }
9535
9536-__must_check unsigned long
9537-_copy_to_user(void __user *to, const void *from, unsigned len);
9538-__must_check unsigned long
9539-_copy_from_user(void *to, const void __user *from, unsigned len);
9540+static __always_inline __must_check unsigned long
9541+__copy_to_user(void __user *to, const void *from, unsigned len);
9542+static __always_inline __must_check unsigned long
9543+__copy_from_user(void *to, const void __user *from, unsigned len);
9544 __must_check unsigned long
9545 copy_in_user(void __user *to, const void __user *from, unsigned len);
9546
9547 static inline unsigned long __must_check copy_from_user(void *to,
9548 const void __user *from,
9549- unsigned long n)
9550+ unsigned n)
9551 {
9552- int sz = __compiletime_object_size(to);
9553-
9554 might_fault();
9555- if (likely(sz == -1 || sz >= n))
9556- n = _copy_from_user(to, from, n);
9557-#ifdef CONFIG_DEBUG_VM
9558- else
9559- WARN(1, "Buffer overflow detected!\n");
9560-#endif
9561+
9562+ if (access_ok(VERIFY_READ, from, n))
9563+ n = __copy_from_user(to, from, n);
9564+ else if ((int)n > 0) {
9565+ if (!__builtin_constant_p(n))
9566+ check_object_size(to, n, false);
9567+ memset(to, 0, n);
9568+ }
9569 return n;
9570 }
9571
9572@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9573 {
9574 might_fault();
9575
9576- return _copy_to_user(dst, src, size);
9577+ if (access_ok(VERIFY_WRITE, dst, size))
9578+ size = __copy_to_user(dst, src, size);
9579+ return size;
9580 }
9581
9582 static __always_inline __must_check
9583-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9584+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9585 {
9586- int ret = 0;
9587+ int sz = __compiletime_object_size(dst);
9588+ unsigned ret = 0;
9589
9590 might_fault();
9591- if (!__builtin_constant_p(size))
9592- return copy_user_generic(dst, (__force void *)src, size);
9593+
9594+ pax_track_stack();
9595+
9596+ if ((int)size < 0)
9597+ return size;
9598+
9599+#ifdef CONFIG_PAX_MEMORY_UDEREF
9600+ if (!__access_ok(VERIFY_READ, src, size))
9601+ return size;
9602+#endif
9603+
9604+ if (unlikely(sz != -1 && sz < size)) {
9605+#ifdef CONFIG_DEBUG_VM
9606+ WARN(1, "Buffer overflow detected!\n");
9607+#endif
9608+ return size;
9609+ }
9610+
9611+ if (!__builtin_constant_p(size)) {
9612+ check_object_size(dst, size, false);
9613+
9614+#ifdef CONFIG_PAX_MEMORY_UDEREF
9615+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9616+ src += PAX_USER_SHADOW_BASE;
9617+#endif
9618+
9619+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9620+ }
9621 switch (size) {
9622- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9623+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9624 ret, "b", "b", "=q", 1);
9625 return ret;
9626- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9627+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9628 ret, "w", "w", "=r", 2);
9629 return ret;
9630- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9631+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9632 ret, "l", "k", "=r", 4);
9633 return ret;
9634- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9635+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9636 ret, "q", "", "=r", 8);
9637 return ret;
9638 case 10:
9639- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9640+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9641 ret, "q", "", "=r", 10);
9642 if (unlikely(ret))
9643 return ret;
9644 __get_user_asm(*(u16 *)(8 + (char *)dst),
9645- (u16 __user *)(8 + (char __user *)src),
9646+ (const u16 __user *)(8 + (const char __user *)src),
9647 ret, "w", "w", "=r", 2);
9648 return ret;
9649 case 16:
9650- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9651+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9652 ret, "q", "", "=r", 16);
9653 if (unlikely(ret))
9654 return ret;
9655 __get_user_asm(*(u64 *)(8 + (char *)dst),
9656- (u64 __user *)(8 + (char __user *)src),
9657+ (const u64 __user *)(8 + (const char __user *)src),
9658 ret, "q", "", "=r", 8);
9659 return ret;
9660 default:
9661- return copy_user_generic(dst, (__force void *)src, size);
9662+
9663+#ifdef CONFIG_PAX_MEMORY_UDEREF
9664+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9665+ src += PAX_USER_SHADOW_BASE;
9666+#endif
9667+
9668+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9669 }
9670 }
9671
9672 static __always_inline __must_check
9673-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9674+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9675 {
9676- int ret = 0;
9677+ int sz = __compiletime_object_size(src);
9678+ unsigned ret = 0;
9679
9680 might_fault();
9681- if (!__builtin_constant_p(size))
9682- return copy_user_generic((__force void *)dst, src, size);
9683+
9684+ pax_track_stack();
9685+
9686+ if ((int)size < 0)
9687+ return size;
9688+
9689+#ifdef CONFIG_PAX_MEMORY_UDEREF
9690+ if (!__access_ok(VERIFY_WRITE, dst, size))
9691+ return size;
9692+#endif
9693+
9694+ if (unlikely(sz != -1 && sz < size)) {
9695+#ifdef CONFIG_DEBUG_VM
9696+ WARN(1, "Buffer overflow detected!\n");
9697+#endif
9698+ return size;
9699+ }
9700+
9701+ if (!__builtin_constant_p(size)) {
9702+ check_object_size(src, size, true);
9703+
9704+#ifdef CONFIG_PAX_MEMORY_UDEREF
9705+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9706+ dst += PAX_USER_SHADOW_BASE;
9707+#endif
9708+
9709+ return copy_user_generic((__force_kernel void *)dst, src, size);
9710+ }
9711 switch (size) {
9712- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9713+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9714 ret, "b", "b", "iq", 1);
9715 return ret;
9716- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9717+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9718 ret, "w", "w", "ir", 2);
9719 return ret;
9720- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9721+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9722 ret, "l", "k", "ir", 4);
9723 return ret;
9724- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9725+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9726 ret, "q", "", "er", 8);
9727 return ret;
9728 case 10:
9729- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9730+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9731 ret, "q", "", "er", 10);
9732 if (unlikely(ret))
9733 return ret;
9734 asm("":::"memory");
9735- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9736+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9737 ret, "w", "w", "ir", 2);
9738 return ret;
9739 case 16:
9740- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9741+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9742 ret, "q", "", "er", 16);
9743 if (unlikely(ret))
9744 return ret;
9745 asm("":::"memory");
9746- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9747+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9748 ret, "q", "", "er", 8);
9749 return ret;
9750 default:
9751- return copy_user_generic((__force void *)dst, src, size);
9752+
9753+#ifdef CONFIG_PAX_MEMORY_UDEREF
9754+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9755+ dst += PAX_USER_SHADOW_BASE;
9756+#endif
9757+
9758+ return copy_user_generic((__force_kernel void *)dst, src, size);
9759 }
9760 }
9761
9762 static __always_inline __must_check
9763-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9764+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9765 {
9766- int ret = 0;
9767+ unsigned ret = 0;
9768
9769 might_fault();
9770- if (!__builtin_constant_p(size))
9771- return copy_user_generic((__force void *)dst,
9772- (__force void *)src, size);
9773+
9774+ if ((int)size < 0)
9775+ return size;
9776+
9777+#ifdef CONFIG_PAX_MEMORY_UDEREF
9778+ if (!__access_ok(VERIFY_READ, src, size))
9779+ return size;
9780+ if (!__access_ok(VERIFY_WRITE, dst, size))
9781+ return size;
9782+#endif
9783+
9784+ if (!__builtin_constant_p(size)) {
9785+
9786+#ifdef CONFIG_PAX_MEMORY_UDEREF
9787+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9788+ src += PAX_USER_SHADOW_BASE;
9789+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9790+ dst += PAX_USER_SHADOW_BASE;
9791+#endif
9792+
9793+ return copy_user_generic((__force_kernel void *)dst,
9794+ (__force_kernel const void *)src, size);
9795+ }
9796 switch (size) {
9797 case 1: {
9798 u8 tmp;
9799- __get_user_asm(tmp, (u8 __user *)src,
9800+ __get_user_asm(tmp, (const u8 __user *)src,
9801 ret, "b", "b", "=q", 1);
9802 if (likely(!ret))
9803 __put_user_asm(tmp, (u8 __user *)dst,
9804@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9805 }
9806 case 2: {
9807 u16 tmp;
9808- __get_user_asm(tmp, (u16 __user *)src,
9809+ __get_user_asm(tmp, (const u16 __user *)src,
9810 ret, "w", "w", "=r", 2);
9811 if (likely(!ret))
9812 __put_user_asm(tmp, (u16 __user *)dst,
9813@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9814
9815 case 4: {
9816 u32 tmp;
9817- __get_user_asm(tmp, (u32 __user *)src,
9818+ __get_user_asm(tmp, (const u32 __user *)src,
9819 ret, "l", "k", "=r", 4);
9820 if (likely(!ret))
9821 __put_user_asm(tmp, (u32 __user *)dst,
9822@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9823 }
9824 case 8: {
9825 u64 tmp;
9826- __get_user_asm(tmp, (u64 __user *)src,
9827+ __get_user_asm(tmp, (const u64 __user *)src,
9828 ret, "q", "", "=r", 8);
9829 if (likely(!ret))
9830 __put_user_asm(tmp, (u64 __user *)dst,
9831@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9832 return ret;
9833 }
9834 default:
9835- return copy_user_generic((__force void *)dst,
9836- (__force void *)src, size);
9837+
9838+#ifdef CONFIG_PAX_MEMORY_UDEREF
9839+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9840+ src += PAX_USER_SHADOW_BASE;
9841+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9842+ dst += PAX_USER_SHADOW_BASE;
9843+#endif
9844+
9845+ return copy_user_generic((__force_kernel void *)dst,
9846+ (__force_kernel const void *)src, size);
9847 }
9848 }
9849
9850@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9851 static __must_check __always_inline int
9852 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9853 {
9854- return copy_user_generic(dst, (__force const void *)src, size);
9855+ pax_track_stack();
9856+
9857+ if ((int)size < 0)
9858+ return size;
9859+
9860+#ifdef CONFIG_PAX_MEMORY_UDEREF
9861+ if (!__access_ok(VERIFY_READ, src, size))
9862+ return size;
9863+
9864+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9865+ src += PAX_USER_SHADOW_BASE;
9866+#endif
9867+
9868+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9869 }
9870
9871-static __must_check __always_inline int
9872+static __must_check __always_inline unsigned long
9873 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9874 {
9875- return copy_user_generic((__force void *)dst, src, size);
9876+ if ((int)size < 0)
9877+ return size;
9878+
9879+#ifdef CONFIG_PAX_MEMORY_UDEREF
9880+ if (!__access_ok(VERIFY_WRITE, dst, size))
9881+ return size;
9882+
9883+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9884+ dst += PAX_USER_SHADOW_BASE;
9885+#endif
9886+
9887+ return copy_user_generic((__force_kernel void *)dst, src, size);
9888 }
9889
9890-extern long __copy_user_nocache(void *dst, const void __user *src,
9891+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9892 unsigned size, int zerorest);
9893
9894-static inline int
9895-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9896+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9897 {
9898 might_sleep();
9899+
9900+ if ((int)size < 0)
9901+ return size;
9902+
9903+#ifdef CONFIG_PAX_MEMORY_UDEREF
9904+ if (!__access_ok(VERIFY_READ, src, size))
9905+ return size;
9906+#endif
9907+
9908 return __copy_user_nocache(dst, src, size, 1);
9909 }
9910
9911-static inline int
9912-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9913+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9914 unsigned size)
9915 {
9916+ if ((int)size < 0)
9917+ return size;
9918+
9919+#ifdef CONFIG_PAX_MEMORY_UDEREF
9920+ if (!__access_ok(VERIFY_READ, src, size))
9921+ return size;
9922+#endif
9923+
9924 return __copy_user_nocache(dst, src, size, 0);
9925 }
9926
9927-unsigned long
9928-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9929+extern unsigned long
9930+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9931
9932 #endif /* _ASM_X86_UACCESS_64_H */
9933diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess.h linux-3.1.1/arch/x86/include/asm/uaccess.h
9934--- linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
9935+++ linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
9936@@ -7,12 +7,15 @@
9937 #include <linux/compiler.h>
9938 #include <linux/thread_info.h>
9939 #include <linux/string.h>
9940+#include <linux/sched.h>
9941 #include <asm/asm.h>
9942 #include <asm/page.h>
9943
9944 #define VERIFY_READ 0
9945 #define VERIFY_WRITE 1
9946
9947+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9948+
9949 /*
9950 * The fs value determines whether argument validity checking should be
9951 * performed or not. If get_fs() == USER_DS, checking is performed, with
9952@@ -28,7 +31,12 @@
9953
9954 #define get_ds() (KERNEL_DS)
9955 #define get_fs() (current_thread_info()->addr_limit)
9956+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9957+void __set_fs(mm_segment_t x);
9958+void set_fs(mm_segment_t x);
9959+#else
9960 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9961+#endif
9962
9963 #define segment_eq(a, b) ((a).seg == (b).seg)
9964
9965@@ -76,7 +84,33 @@
9966 * checks that the pointer is in the user space range - after calling
9967 * this function, memory access functions may still return -EFAULT.
9968 */
9969-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9970+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9971+#define access_ok(type, addr, size) \
9972+({ \
9973+ long __size = size; \
9974+ unsigned long __addr = (unsigned long)addr; \
9975+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9976+ unsigned long __end_ao = __addr + __size - 1; \
9977+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9978+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9979+ while(__addr_ao <= __end_ao) { \
9980+ char __c_ao; \
9981+ __addr_ao += PAGE_SIZE; \
9982+ if (__size > PAGE_SIZE) \
9983+ cond_resched(); \
9984+ if (__get_user(__c_ao, (char __user *)__addr)) \
9985+ break; \
9986+ if (type != VERIFY_WRITE) { \
9987+ __addr = __addr_ao; \
9988+ continue; \
9989+ } \
9990+ if (__put_user(__c_ao, (char __user *)__addr)) \
9991+ break; \
9992+ __addr = __addr_ao; \
9993+ } \
9994+ } \
9995+ __ret_ao; \
9996+})
9997
9998 /*
9999 * The exception table consists of pairs of addresses: the first is the
10000@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10001 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10002 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10003
10004-
10005+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10006+#define __copyuser_seg "gs;"
10007+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10008+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10009+#else
10010+#define __copyuser_seg
10011+#define __COPYUSER_SET_ES
10012+#define __COPYUSER_RESTORE_ES
10013+#endif
10014
10015 #ifdef CONFIG_X86_32
10016 #define __put_user_asm_u64(x, addr, err, errret) \
10017- asm volatile("1: movl %%eax,0(%2)\n" \
10018- "2: movl %%edx,4(%2)\n" \
10019+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10020+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10021 "3:\n" \
10022 ".section .fixup,\"ax\"\n" \
10023 "4: movl %3,%0\n" \
10024@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10025 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10026
10027 #define __put_user_asm_ex_u64(x, addr) \
10028- asm volatile("1: movl %%eax,0(%1)\n" \
10029- "2: movl %%edx,4(%1)\n" \
10030+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10031+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10032 "3:\n" \
10033 _ASM_EXTABLE(1b, 2b - 1b) \
10034 _ASM_EXTABLE(2b, 3b - 2b) \
10035@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10036 __typeof__(*(ptr)) __pu_val; \
10037 __chk_user_ptr(ptr); \
10038 might_fault(); \
10039- __pu_val = x; \
10040+ __pu_val = (x); \
10041 switch (sizeof(*(ptr))) { \
10042 case 1: \
10043 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10044@@ -373,7 +415,7 @@ do { \
10045 } while (0)
10046
10047 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10048- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10049+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10050 "2:\n" \
10051 ".section .fixup,\"ax\"\n" \
10052 "3: mov %3,%0\n" \
10053@@ -381,7 +423,7 @@ do { \
10054 " jmp 2b\n" \
10055 ".previous\n" \
10056 _ASM_EXTABLE(1b, 3b) \
10057- : "=r" (err), ltype(x) \
10058+ : "=r" (err), ltype (x) \
10059 : "m" (__m(addr)), "i" (errret), "0" (err))
10060
10061 #define __get_user_size_ex(x, ptr, size) \
10062@@ -406,7 +448,7 @@ do { \
10063 } while (0)
10064
10065 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10066- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10067+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10068 "2:\n" \
10069 _ASM_EXTABLE(1b, 2b - 1b) \
10070 : ltype(x) : "m" (__m(addr)))
10071@@ -423,13 +465,24 @@ do { \
10072 int __gu_err; \
10073 unsigned long __gu_val; \
10074 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10075- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10076+ (x) = (__typeof__(*(ptr)))__gu_val; \
10077 __gu_err; \
10078 })
10079
10080 /* FIXME: this hack is definitely wrong -AK */
10081 struct __large_struct { unsigned long buf[100]; };
10082-#define __m(x) (*(struct __large_struct __user *)(x))
10083+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10084+#define ____m(x) \
10085+({ \
10086+ unsigned long ____x = (unsigned long)(x); \
10087+ if (____x < PAX_USER_SHADOW_BASE) \
10088+ ____x += PAX_USER_SHADOW_BASE; \
10089+ (void __user *)____x; \
10090+})
10091+#else
10092+#define ____m(x) (x)
10093+#endif
10094+#define __m(x) (*(struct __large_struct __user *)____m(x))
10095
10096 /*
10097 * Tell gcc we read from memory instead of writing: this is because
10098@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10099 * aliasing issues.
10100 */
10101 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10102- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10103+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10104 "2:\n" \
10105 ".section .fixup,\"ax\"\n" \
10106 "3: mov %3,%0\n" \
10107@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10108 ".previous\n" \
10109 _ASM_EXTABLE(1b, 3b) \
10110 : "=r"(err) \
10111- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10112+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10113
10114 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10115- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10116+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10117 "2:\n" \
10118 _ASM_EXTABLE(1b, 2b - 1b) \
10119 : : ltype(x), "m" (__m(addr)))
10120@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10121 * On error, the variable @x is set to zero.
10122 */
10123
10124+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10125+#define __get_user(x, ptr) get_user((x), (ptr))
10126+#else
10127 #define __get_user(x, ptr) \
10128 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10129+#endif
10130
10131 /**
10132 * __put_user: - Write a simple value into user space, with less checking.
10133@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10134 * Returns zero on success, or -EFAULT on error.
10135 */
10136
10137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10138+#define __put_user(x, ptr) put_user((x), (ptr))
10139+#else
10140 #define __put_user(x, ptr) \
10141 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10142+#endif
10143
10144 #define __get_user_unaligned __get_user
10145 #define __put_user_unaligned __put_user
10146@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10147 #define get_user_ex(x, ptr) do { \
10148 unsigned long __gue_val; \
10149 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10150- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10151+ (x) = (__typeof__(*(ptr)))__gue_val; \
10152 } while (0)
10153
10154 #ifdef CONFIG_X86_WP_WORKS_OK
10155diff -urNp linux-3.1.1/arch/x86/include/asm/vdso.h linux-3.1.1/arch/x86/include/asm/vdso.h
10156--- linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-11 15:19:27.000000000 -0500
10157+++ linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-16 18:39:07.000000000 -0500
10158@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10159 #define VDSO32_SYMBOL(base, name) \
10160 ({ \
10161 extern const char VDSO32_##name[]; \
10162- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10163+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10164 })
10165 #endif
10166
10167diff -urNp linux-3.1.1/arch/x86/include/asm/x86_init.h linux-3.1.1/arch/x86/include/asm/x86_init.h
10168--- linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-11 15:19:27.000000000 -0500
10169+++ linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-16 18:39:07.000000000 -0500
10170@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10171 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10172 void (*find_smp_config)(void);
10173 void (*get_smp_config)(unsigned int early);
10174-};
10175+} __no_const;
10176
10177 /**
10178 * struct x86_init_resources - platform specific resource related ops
10179@@ -42,7 +42,7 @@ struct x86_init_resources {
10180 void (*probe_roms)(void);
10181 void (*reserve_resources)(void);
10182 char *(*memory_setup)(void);
10183-};
10184+} __no_const;
10185
10186 /**
10187 * struct x86_init_irqs - platform specific interrupt setup
10188@@ -55,7 +55,7 @@ struct x86_init_irqs {
10189 void (*pre_vector_init)(void);
10190 void (*intr_init)(void);
10191 void (*trap_init)(void);
10192-};
10193+} __no_const;
10194
10195 /**
10196 * struct x86_init_oem - oem platform specific customizing functions
10197@@ -65,7 +65,7 @@ struct x86_init_irqs {
10198 struct x86_init_oem {
10199 void (*arch_setup)(void);
10200 void (*banner)(void);
10201-};
10202+} __no_const;
10203
10204 /**
10205 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10206@@ -76,7 +76,7 @@ struct x86_init_oem {
10207 */
10208 struct x86_init_mapping {
10209 void (*pagetable_reserve)(u64 start, u64 end);
10210-};
10211+} __no_const;
10212
10213 /**
10214 * struct x86_init_paging - platform specific paging functions
10215@@ -86,7 +86,7 @@ struct x86_init_mapping {
10216 struct x86_init_paging {
10217 void (*pagetable_setup_start)(pgd_t *base);
10218 void (*pagetable_setup_done)(pgd_t *base);
10219-};
10220+} __no_const;
10221
10222 /**
10223 * struct x86_init_timers - platform specific timer setup
10224@@ -101,7 +101,7 @@ struct x86_init_timers {
10225 void (*tsc_pre_init)(void);
10226 void (*timer_init)(void);
10227 void (*wallclock_init)(void);
10228-};
10229+} __no_const;
10230
10231 /**
10232 * struct x86_init_iommu - platform specific iommu setup
10233@@ -109,7 +109,7 @@ struct x86_init_timers {
10234 */
10235 struct x86_init_iommu {
10236 int (*iommu_init)(void);
10237-};
10238+} __no_const;
10239
10240 /**
10241 * struct x86_init_pci - platform specific pci init functions
10242@@ -123,7 +123,7 @@ struct x86_init_pci {
10243 int (*init)(void);
10244 void (*init_irq)(void);
10245 void (*fixup_irqs)(void);
10246-};
10247+} __no_const;
10248
10249 /**
10250 * struct x86_init_ops - functions for platform specific setup
10251@@ -139,7 +139,7 @@ struct x86_init_ops {
10252 struct x86_init_timers timers;
10253 struct x86_init_iommu iommu;
10254 struct x86_init_pci pci;
10255-};
10256+} __no_const;
10257
10258 /**
10259 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10260@@ -147,7 +147,7 @@ struct x86_init_ops {
10261 */
10262 struct x86_cpuinit_ops {
10263 void (*setup_percpu_clockev)(void);
10264-};
10265+} __no_const;
10266
10267 /**
10268 * struct x86_platform_ops - platform specific runtime functions
10269@@ -166,7 +166,7 @@ struct x86_platform_ops {
10270 bool (*is_untracked_pat_range)(u64 start, u64 end);
10271 void (*nmi_init)(void);
10272 int (*i8042_detect)(void);
10273-};
10274+} __no_const;
10275
10276 struct pci_dev;
10277
10278@@ -174,7 +174,7 @@ struct x86_msi_ops {
10279 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10280 void (*teardown_msi_irq)(unsigned int irq);
10281 void (*teardown_msi_irqs)(struct pci_dev *dev);
10282-};
10283+} __no_const;
10284
10285 extern struct x86_init_ops x86_init;
10286 extern struct x86_cpuinit_ops x86_cpuinit;
10287diff -urNp linux-3.1.1/arch/x86/include/asm/xsave.h linux-3.1.1/arch/x86/include/asm/xsave.h
10288--- linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-11 15:19:27.000000000 -0500
10289+++ linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-16 18:39:07.000000000 -0500
10290@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10291 {
10292 int err;
10293
10294+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10295+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10296+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10297+#endif
10298+
10299 /*
10300 * Clear the xsave header first, so that reserved fields are
10301 * initialized to zero.
10302@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10303 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10304 {
10305 int err;
10306- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10307+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10308 u32 lmask = mask;
10309 u32 hmask = mask >> 32;
10310
10311+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10312+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10313+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10314+#endif
10315+
10316 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10317 "2:\n"
10318 ".section .fixup,\"ax\"\n"
10319diff -urNp linux-3.1.1/arch/x86/Kconfig linux-3.1.1/arch/x86/Kconfig
10320--- linux-3.1.1/arch/x86/Kconfig 2011-11-11 15:19:27.000000000 -0500
10321+++ linux-3.1.1/arch/x86/Kconfig 2011-11-16 18:40:08.000000000 -0500
10322@@ -236,7 +236,7 @@ config X86_HT
10323
10324 config X86_32_LAZY_GS
10325 def_bool y
10326- depends on X86_32 && !CC_STACKPROTECTOR
10327+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10328
10329 config ARCH_HWEIGHT_CFLAGS
10330 string
10331@@ -1019,7 +1019,7 @@ choice
10332
10333 config NOHIGHMEM
10334 bool "off"
10335- depends on !X86_NUMAQ
10336+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10337 ---help---
10338 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10339 However, the address space of 32-bit x86 processors is only 4
10340@@ -1056,7 +1056,7 @@ config NOHIGHMEM
10341
10342 config HIGHMEM4G
10343 bool "4GB"
10344- depends on !X86_NUMAQ
10345+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10346 ---help---
10347 Select this if you have a 32-bit processor and between 1 and 4
10348 gigabytes of physical RAM.
10349@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
10350 hex
10351 default 0xB0000000 if VMSPLIT_3G_OPT
10352 default 0x80000000 if VMSPLIT_2G
10353- default 0x78000000 if VMSPLIT_2G_OPT
10354+ default 0x70000000 if VMSPLIT_2G_OPT
10355 default 0x40000000 if VMSPLIT_1G
10356 default 0xC0000000
10357 depends on X86_32
10358@@ -1484,6 +1484,7 @@ config SECCOMP
10359
10360 config CC_STACKPROTECTOR
10361 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10362+ depends on X86_64 || !PAX_MEMORY_UDEREF
10363 ---help---
10364 This option turns on the -fstack-protector GCC feature. This
10365 feature puts, at the beginning of functions, a canary value on
10366@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
10367 config PHYSICAL_START
10368 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10369 default "0x1000000"
10370+ range 0x400000 0x40000000
10371 ---help---
10372 This gives the physical address where the kernel is loaded.
10373
10374@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
10375 config PHYSICAL_ALIGN
10376 hex "Alignment value to which kernel should be aligned" if X86_32
10377 default "0x1000000"
10378+ range 0x400000 0x1000000 if PAX_KERNEXEC
10379 range 0x2000 0x1000000
10380 ---help---
10381 This value puts the alignment restrictions on physical address
10382@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
10383 Say N if you want to disable CPU hotplug.
10384
10385 config COMPAT_VDSO
10386- def_bool y
10387+ def_bool n
10388 prompt "Compat VDSO support"
10389 depends on X86_32 || IA32_EMULATION
10390+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10391 ---help---
10392 Map the 32-bit VDSO to the predictable old-style address too.
10393
10394diff -urNp linux-3.1.1/arch/x86/Kconfig.cpu linux-3.1.1/arch/x86/Kconfig.cpu
10395--- linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-11 15:19:27.000000000 -0500
10396+++ linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-16 18:39:07.000000000 -0500
10397@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
10398
10399 config X86_F00F_BUG
10400 def_bool y
10401- depends on M586MMX || M586TSC || M586 || M486 || M386
10402+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10403
10404 config X86_INVD_BUG
10405 def_bool y
10406@@ -365,7 +365,7 @@ config X86_POPAD_OK
10407
10408 config X86_ALIGNMENT_16
10409 def_bool y
10410- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10411+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10412
10413 config X86_INTEL_USERCOPY
10414 def_bool y
10415@@ -411,7 +411,7 @@ config X86_CMPXCHG64
10416 # generates cmov.
10417 config X86_CMOV
10418 def_bool y
10419- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10420+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10421
10422 config X86_MINIMUM_CPU_FAMILY
10423 int
10424diff -urNp linux-3.1.1/arch/x86/Kconfig.debug linux-3.1.1/arch/x86/Kconfig.debug
10425--- linux-3.1.1/arch/x86/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
10426+++ linux-3.1.1/arch/x86/Kconfig.debug 2011-11-16 18:39:07.000000000 -0500
10427@@ -81,7 +81,7 @@ config X86_PTDUMP
10428 config DEBUG_RODATA
10429 bool "Write protect kernel read-only data structures"
10430 default y
10431- depends on DEBUG_KERNEL
10432+ depends on DEBUG_KERNEL && BROKEN
10433 ---help---
10434 Mark the kernel read-only data as write-protected in the pagetables,
10435 in order to catch accidental (and incorrect) writes to such const
10436@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10437
10438 config DEBUG_SET_MODULE_RONX
10439 bool "Set loadable kernel module data as NX and text as RO"
10440- depends on MODULES
10441+ depends on MODULES && BROKEN
10442 ---help---
10443 This option helps catch unintended modifications to loadable
10444 kernel module's text and read-only data. It also prevents execution
10445diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile
10446--- linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-11 15:19:27.000000000 -0500
10447+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-16 18:39:07.000000000 -0500
10448@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10449 $(call cc-option, -fno-stack-protector) \
10450 $(call cc-option, -mpreferred-stack-boundary=2)
10451 KBUILD_CFLAGS += $(call cc-option, -m32)
10452+ifdef CONSTIFY_PLUGIN
10453+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10454+endif
10455 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10456 GCOV_PROFILE := n
10457
10458diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S
10459--- linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-11 15:19:27.000000000 -0500
10460+++ linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-16 18:40:08.000000000 -0500
10461@@ -108,6 +108,9 @@ wakeup_code:
10462 /* Do any other stuff... */
10463
10464 #ifndef CONFIG_64BIT
10465+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10466+ call verify_cpu
10467+
10468 /* This could also be done in C code... */
10469 movl pmode_cr3, %eax
10470 movl %eax, %cr3
10471@@ -131,6 +134,7 @@ wakeup_code:
10472 movl pmode_cr0, %eax
10473 movl %eax, %cr0
10474 jmp pmode_return
10475+# include "../../verify_cpu.S"
10476 #else
10477 pushw $0
10478 pushw trampoline_segment
10479diff -urNp linux-3.1.1/arch/x86/kernel/acpi/sleep.c linux-3.1.1/arch/x86/kernel/acpi/sleep.c
10480--- linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-11 15:19:27.000000000 -0500
10481+++ linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-16 18:39:07.000000000 -0500
10482@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10483 header->trampoline_segment = trampoline_address() >> 4;
10484 #ifdef CONFIG_SMP
10485 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10486+
10487+ pax_open_kernel();
10488 early_gdt_descr.address =
10489 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10490+ pax_close_kernel();
10491+
10492 initial_gs = per_cpu_offset(smp_processor_id());
10493 #endif
10494 initial_code = (unsigned long)wakeup_long64;
10495diff -urNp linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S
10496--- linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-11 15:19:27.000000000 -0500
10497+++ linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-16 18:39:07.000000000 -0500
10498@@ -30,13 +30,11 @@ wakeup_pmode_return:
10499 # and restore the stack ... but you need gdt for this to work
10500 movl saved_context_esp, %esp
10501
10502- movl %cs:saved_magic, %eax
10503- cmpl $0x12345678, %eax
10504+ cmpl $0x12345678, saved_magic
10505 jne bogus_magic
10506
10507 # jump to place where we left off
10508- movl saved_eip, %eax
10509- jmp *%eax
10510+ jmp *(saved_eip)
10511
10512 bogus_magic:
10513 jmp bogus_magic
10514diff -urNp linux-3.1.1/arch/x86/kernel/alternative.c linux-3.1.1/arch/x86/kernel/alternative.c
10515--- linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-11 15:19:27.000000000 -0500
10516+++ linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-16 18:39:07.000000000 -0500
10517@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
10518 */
10519 for (a = start; a < end; a++) {
10520 instr = (u8 *)&a->instr_offset + a->instr_offset;
10521+
10522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10523+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10524+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
10525+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10526+#endif
10527+
10528 replacement = (u8 *)&a->repl_offset + a->repl_offset;
10529 BUG_ON(a->replacementlen > a->instrlen);
10530 BUG_ON(a->instrlen > sizeof(insnbuf));
10531@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
10532 for (poff = start; poff < end; poff++) {
10533 u8 *ptr = (u8 *)poff + *poff;
10534
10535+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10536+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10537+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10538+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10539+#endif
10540+
10541 if (!*poff || ptr < text || ptr >= text_end)
10542 continue;
10543 /* turn DS segment override prefix into lock prefix */
10544- if (*ptr == 0x3e)
10545+ if (*ktla_ktva(ptr) == 0x3e)
10546 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10547 };
10548 mutex_unlock(&text_mutex);
10549@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
10550 for (poff = start; poff < end; poff++) {
10551 u8 *ptr = (u8 *)poff + *poff;
10552
10553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10554+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10555+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10556+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10557+#endif
10558+
10559 if (!*poff || ptr < text || ptr >= text_end)
10560 continue;
10561 /* turn lock prefix into DS segment override prefix */
10562- if (*ptr == 0xf0)
10563+ if (*ktla_ktva(ptr) == 0xf0)
10564 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10565 };
10566 mutex_unlock(&text_mutex);
10567@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
10568
10569 BUG_ON(p->len > MAX_PATCH_LEN);
10570 /* prep the buffer with the original instructions */
10571- memcpy(insnbuf, p->instr, p->len);
10572+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10573 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10574 (unsigned long)p->instr, p->len);
10575
10576@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
10577 if (smp_alt_once)
10578 free_init_pages("SMP alternatives",
10579 (unsigned long)__smp_locks,
10580- (unsigned long)__smp_locks_end);
10581+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10582
10583 restart_nmi();
10584 }
10585@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
10586 * instructions. And on the local CPU you need to be protected again NMI or MCE
10587 * handlers seeing an inconsistent instruction while you patch.
10588 */
10589-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10590+void *__kprobes text_poke_early(void *addr, const void *opcode,
10591 size_t len)
10592 {
10593 unsigned long flags;
10594 local_irq_save(flags);
10595- memcpy(addr, opcode, len);
10596+
10597+ pax_open_kernel();
10598+ memcpy(ktla_ktva(addr), opcode, len);
10599 sync_core();
10600+ pax_close_kernel();
10601+
10602 local_irq_restore(flags);
10603 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10604 that causes hangs on some VIA CPUs. */
10605@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
10606 */
10607 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10608 {
10609- unsigned long flags;
10610- char *vaddr;
10611+ unsigned char *vaddr = ktla_ktva(addr);
10612 struct page *pages[2];
10613- int i;
10614+ size_t i;
10615
10616 if (!core_kernel_text((unsigned long)addr)) {
10617- pages[0] = vmalloc_to_page(addr);
10618- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10619+ pages[0] = vmalloc_to_page(vaddr);
10620+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10621 } else {
10622- pages[0] = virt_to_page(addr);
10623+ pages[0] = virt_to_page(vaddr);
10624 WARN_ON(!PageReserved(pages[0]));
10625- pages[1] = virt_to_page(addr + PAGE_SIZE);
10626+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10627 }
10628 BUG_ON(!pages[0]);
10629- local_irq_save(flags);
10630- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10631- if (pages[1])
10632- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10633- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10634- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10635- clear_fixmap(FIX_TEXT_POKE0);
10636- if (pages[1])
10637- clear_fixmap(FIX_TEXT_POKE1);
10638- local_flush_tlb();
10639- sync_core();
10640- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10641- that causes hangs on some VIA CPUs. */
10642+ text_poke_early(addr, opcode, len);
10643 for (i = 0; i < len; i++)
10644- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10645- local_irq_restore(flags);
10646+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10647 return addr;
10648 }
10649
10650diff -urNp linux-3.1.1/arch/x86/kernel/apic/apic.c linux-3.1.1/arch/x86/kernel/apic/apic.c
10651--- linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-11 15:19:27.000000000 -0500
10652+++ linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-16 18:40:08.000000000 -0500
10653@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
10654 /*
10655 * Debug level, exported for io_apic.c
10656 */
10657-unsigned int apic_verbosity;
10658+int apic_verbosity;
10659
10660 int pic_mode;
10661
10662@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs
10663 apic_write(APIC_ESR, 0);
10664 v1 = apic_read(APIC_ESR);
10665 ack_APIC_irq();
10666- atomic_inc(&irq_err_count);
10667+ atomic_inc_unchecked(&irq_err_count);
10668
10669 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10670 smp_processor_id(), v0 , v1);
10671@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(vo
10672 u16 *bios_cpu_apicid;
10673 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10674
10675+ pax_track_stack();
10676+
10677 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10678 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10679
10680diff -urNp linux-3.1.1/arch/x86/kernel/apic/io_apic.c linux-3.1.1/arch/x86/kernel/apic/io_apic.c
10681--- linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-11 15:19:27.000000000 -0500
10682+++ linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-16 18:39:07.000000000 -0500
10683@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10684 }
10685 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10686
10687-void lock_vector_lock(void)
10688+void lock_vector_lock(void) __acquires(vector_lock)
10689 {
10690 /* Used to the online set of cpus does not change
10691 * during assign_irq_vector.
10692@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10693 raw_spin_lock(&vector_lock);
10694 }
10695
10696-void unlock_vector_lock(void)
10697+void unlock_vector_lock(void) __releases(vector_lock)
10698 {
10699 raw_spin_unlock(&vector_lock);
10700 }
10701@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_dat
10702 ack_APIC_irq();
10703 }
10704
10705-atomic_t irq_mis_count;
10706+atomic_unchecked_t irq_mis_count;
10707
10708 /*
10709 * IO-APIC versions below 0x20 don't support EOI register.
10710@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_da
10711 * at the cpu.
10712 */
10713 if (!(v & (1 << (i & 0x1f)))) {
10714- atomic_inc(&irq_mis_count);
10715+ atomic_inc_unchecked(&irq_mis_count);
10716
10717 eoi_ioapic_irq(irq, cfg);
10718 }
10719diff -urNp linux-3.1.1/arch/x86/kernel/apm_32.c linux-3.1.1/arch/x86/kernel/apm_32.c
10720--- linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-11 15:19:27.000000000 -0500
10721+++ linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-16 18:39:07.000000000 -0500
10722@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10723 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10724 * even though they are called in protected mode.
10725 */
10726-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10727+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10728 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10729
10730 static const char driver_version[] = "1.16ac"; /* no spaces */
10731@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10732 BUG_ON(cpu != 0);
10733 gdt = get_cpu_gdt_table(cpu);
10734 save_desc_40 = gdt[0x40 / 8];
10735+
10736+ pax_open_kernel();
10737 gdt[0x40 / 8] = bad_bios_desc;
10738+ pax_close_kernel();
10739
10740 apm_irq_save(flags);
10741 APM_DO_SAVE_SEGS;
10742@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10743 &call->esi);
10744 APM_DO_RESTORE_SEGS;
10745 apm_irq_restore(flags);
10746+
10747+ pax_open_kernel();
10748 gdt[0x40 / 8] = save_desc_40;
10749+ pax_close_kernel();
10750+
10751 put_cpu();
10752
10753 return call->eax & 0xff;
10754@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10755 BUG_ON(cpu != 0);
10756 gdt = get_cpu_gdt_table(cpu);
10757 save_desc_40 = gdt[0x40 / 8];
10758+
10759+ pax_open_kernel();
10760 gdt[0x40 / 8] = bad_bios_desc;
10761+ pax_close_kernel();
10762
10763 apm_irq_save(flags);
10764 APM_DO_SAVE_SEGS;
10765@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10766 &call->eax);
10767 APM_DO_RESTORE_SEGS;
10768 apm_irq_restore(flags);
10769+
10770+ pax_open_kernel();
10771 gdt[0x40 / 8] = save_desc_40;
10772+ pax_close_kernel();
10773+
10774 put_cpu();
10775 return error;
10776 }
10777@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10778 * code to that CPU.
10779 */
10780 gdt = get_cpu_gdt_table(0);
10781+
10782+ pax_open_kernel();
10783 set_desc_base(&gdt[APM_CS >> 3],
10784 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10785 set_desc_base(&gdt[APM_CS_16 >> 3],
10786 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10787 set_desc_base(&gdt[APM_DS >> 3],
10788 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10789+ pax_close_kernel();
10790
10791 proc_create("apm", 0, NULL, &apm_file_ops);
10792
10793diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets_64.c linux-3.1.1/arch/x86/kernel/asm-offsets_64.c
10794--- linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-11 15:19:27.000000000 -0500
10795+++ linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-16 18:39:07.000000000 -0500
10796@@ -69,6 +69,7 @@ int main(void)
10797 BLANK();
10798 #undef ENTRY
10799
10800+ DEFINE(TSS_size, sizeof(struct tss_struct));
10801 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10802 BLANK();
10803
10804diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets.c linux-3.1.1/arch/x86/kernel/asm-offsets.c
10805--- linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-11 15:19:27.000000000 -0500
10806+++ linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-16 18:39:07.000000000 -0500
10807@@ -33,6 +33,8 @@ void common(void) {
10808 OFFSET(TI_status, thread_info, status);
10809 OFFSET(TI_addr_limit, thread_info, addr_limit);
10810 OFFSET(TI_preempt_count, thread_info, preempt_count);
10811+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10812+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10813
10814 BLANK();
10815 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10816@@ -53,8 +55,26 @@ void common(void) {
10817 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10818 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10819 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10820+
10821+#ifdef CONFIG_PAX_KERNEXEC
10822+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10823+#endif
10824+
10825+#ifdef CONFIG_PAX_MEMORY_UDEREF
10826+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10827+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10828+#ifdef CONFIG_X86_64
10829+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10830+#endif
10831 #endif
10832
10833+#endif
10834+
10835+ BLANK();
10836+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10837+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10838+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10839+
10840 #ifdef CONFIG_XEN
10841 BLANK();
10842 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10843diff -urNp linux-3.1.1/arch/x86/kernel/cpu/amd.c linux-3.1.1/arch/x86/kernel/cpu/amd.c
10844--- linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-11 15:19:27.000000000 -0500
10845+++ linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-16 18:39:07.000000000 -0500
10846@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10847 unsigned int size)
10848 {
10849 /* AMD errata T13 (order #21922) */
10850- if ((c->x86 == 6)) {
10851+ if (c->x86 == 6) {
10852 /* Duron Rev A0 */
10853 if (c->x86_model == 3 && c->x86_mask == 0)
10854 size = 64;
10855diff -urNp linux-3.1.1/arch/x86/kernel/cpu/common.c linux-3.1.1/arch/x86/kernel/cpu/common.c
10856--- linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-11 15:19:27.000000000 -0500
10857+++ linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-16 18:39:07.000000000 -0500
10858@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10859
10860 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10861
10862-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10863-#ifdef CONFIG_X86_64
10864- /*
10865- * We need valid kernel segments for data and code in long mode too
10866- * IRET will check the segment types kkeil 2000/10/28
10867- * Also sysret mandates a special GDT layout
10868- *
10869- * TLS descriptors are currently at a different place compared to i386.
10870- * Hopefully nobody expects them at a fixed place (Wine?)
10871- */
10872- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10873- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10874- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10875- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10876- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10877- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10878-#else
10879- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10880- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10881- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10882- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10883- /*
10884- * Segments used for calling PnP BIOS have byte granularity.
10885- * They code segments and data segments have fixed 64k limits,
10886- * the transfer segment sizes are set at run time.
10887- */
10888- /* 32-bit code */
10889- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10890- /* 16-bit code */
10891- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10892- /* 16-bit data */
10893- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10894- /* 16-bit data */
10895- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10896- /* 16-bit data */
10897- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10898- /*
10899- * The APM segments have byte granularity and their bases
10900- * are set at run time. All have 64k limits.
10901- */
10902- /* 32-bit code */
10903- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10904- /* 16-bit code */
10905- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10906- /* data */
10907- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10908-
10909- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10910- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10911- GDT_STACK_CANARY_INIT
10912-#endif
10913-} };
10914-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10915-
10916 static int __init x86_xsave_setup(char *s)
10917 {
10918 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10919@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10920 {
10921 struct desc_ptr gdt_descr;
10922
10923- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10924+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10925 gdt_descr.size = GDT_SIZE - 1;
10926 load_gdt(&gdt_descr);
10927 /* Reload the per-cpu base */
10928@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10929 /* Filter out anything that depends on CPUID levels we don't have */
10930 filter_cpuid_features(c, true);
10931
10932+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10933+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10934+#endif
10935+
10936 /* If the model name is still unset, do table lookup. */
10937 if (!c->x86_model_id[0]) {
10938 const char *p;
10939@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10940 }
10941 __setup("clearcpuid=", setup_disablecpuid);
10942
10943+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10944+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10945+
10946 #ifdef CONFIG_X86_64
10947 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10948
10949@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10950 EXPORT_PER_CPU_SYMBOL(current_task);
10951
10952 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10953- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10954+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10955 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10956
10957 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10958@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10959 {
10960 memset(regs, 0, sizeof(struct pt_regs));
10961 regs->fs = __KERNEL_PERCPU;
10962- regs->gs = __KERNEL_STACK_CANARY;
10963+ savesegment(gs, regs->gs);
10964
10965 return regs;
10966 }
10967@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10968 int i;
10969
10970 cpu = stack_smp_processor_id();
10971- t = &per_cpu(init_tss, cpu);
10972+ t = init_tss + cpu;
10973 oist = &per_cpu(orig_ist, cpu);
10974
10975 #ifdef CONFIG_NUMA
10976@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10977 switch_to_new_gdt(cpu);
10978 loadsegment(fs, 0);
10979
10980- load_idt((const struct desc_ptr *)&idt_descr);
10981+ load_idt(&idt_descr);
10982
10983 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10984 syscall_init();
10985@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10986 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10987 barrier();
10988
10989- x86_configure_nx();
10990 if (cpu != 0)
10991 enable_x2apic();
10992
10993@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10994 {
10995 int cpu = smp_processor_id();
10996 struct task_struct *curr = current;
10997- struct tss_struct *t = &per_cpu(init_tss, cpu);
10998+ struct tss_struct *t = init_tss + cpu;
10999 struct thread_struct *thread = &curr->thread;
11000
11001 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11002diff -urNp linux-3.1.1/arch/x86/kernel/cpu/intel.c linux-3.1.1/arch/x86/kernel/cpu/intel.c
11003--- linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-11 15:19:27.000000000 -0500
11004+++ linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-16 18:39:07.000000000 -0500
11005@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11006 * Update the IDT descriptor and reload the IDT so that
11007 * it uses the read-only mapped virtual address.
11008 */
11009- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11010+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11011 load_idt(&idt_descr);
11012 }
11013 #endif
11014diff -urNp linux-3.1.1/arch/x86/kernel/cpu/Makefile linux-3.1.1/arch/x86/kernel/cpu/Makefile
11015--- linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-11 15:19:27.000000000 -0500
11016+++ linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-16 18:39:07.000000000 -0500
11017@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11018 CFLAGS_REMOVE_perf_event.o = -pg
11019 endif
11020
11021-# Make sure load_percpu_segment has no stackprotector
11022-nostackp := $(call cc-option, -fno-stack-protector)
11023-CFLAGS_common.o := $(nostackp)
11024-
11025 obj-y := intel_cacheinfo.o scattered.o topology.o
11026 obj-y += proc.o capflags.o powerflags.o common.o
11027 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11028diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c
11029--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-11 15:19:27.000000000 -0500
11030+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-16 18:39:07.000000000 -0500
11031@@ -42,6 +42,7 @@
11032 #include <asm/processor.h>
11033 #include <asm/mce.h>
11034 #include <asm/msr.h>
11035+#include <asm/local.h>
11036
11037 #include "mce-internal.h"
11038
11039@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11040 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11041 m->cs, m->ip);
11042
11043- if (m->cs == __KERNEL_CS)
11044+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11045 print_symbol("{%s}", m->ip);
11046 pr_cont("\n");
11047 }
11048@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
11049
11050 #define PANIC_TIMEOUT 5 /* 5 seconds */
11051
11052-static atomic_t mce_paniced;
11053+static atomic_unchecked_t mce_paniced;
11054
11055 static int fake_panic;
11056-static atomic_t mce_fake_paniced;
11057+static atomic_unchecked_t mce_fake_paniced;
11058
11059 /* Panic in progress. Enable interrupts and wait for final IPI */
11060 static void wait_for_panic(void)
11061@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct
11062 /*
11063 * Make sure only one CPU runs in machine check panic
11064 */
11065- if (atomic_inc_return(&mce_paniced) > 1)
11066+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11067 wait_for_panic();
11068 barrier();
11069
11070@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct
11071 console_verbose();
11072 } else {
11073 /* Don't log too much for fake panic */
11074- if (atomic_inc_return(&mce_fake_paniced) > 1)
11075+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11076 return;
11077 }
11078 /* First print corrected ones that are still unlogged */
11079@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
11080 * might have been modified by someone else.
11081 */
11082 rmb();
11083- if (atomic_read(&mce_paniced))
11084+ if (atomic_read_unchecked(&mce_paniced))
11085 wait_for_panic();
11086 if (!monarch_timeout)
11087 goto out;
11088@@ -1429,7 +1430,7 @@ void __cpuinit mcheck_cpu_init(struct cp
11089 */
11090
11091 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11092-static int mce_chrdev_open_count; /* #times opened */
11093+static local_t mce_chrdev_open_count; /* #times opened */
11094 static int mce_chrdev_open_exclu; /* already open exclusive? */
11095
11096 static int mce_chrdev_open(struct inode *inode, struct file *file)
11097@@ -1437,7 +1438,7 @@ static int mce_chrdev_open(struct inode
11098 spin_lock(&mce_chrdev_state_lock);
11099
11100 if (mce_chrdev_open_exclu ||
11101- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
11102+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
11103 spin_unlock(&mce_chrdev_state_lock);
11104
11105 return -EBUSY;
11106@@ -1445,7 +1446,7 @@ static int mce_chrdev_open(struct inode
11107
11108 if (file->f_flags & O_EXCL)
11109 mce_chrdev_open_exclu = 1;
11110- mce_chrdev_open_count++;
11111+ local_inc(&mce_chrdev_open_count);
11112
11113 spin_unlock(&mce_chrdev_state_lock);
11114
11115@@ -1456,7 +1457,7 @@ static int mce_chrdev_release(struct ino
11116 {
11117 spin_lock(&mce_chrdev_state_lock);
11118
11119- mce_chrdev_open_count--;
11120+ local_dec(&mce_chrdev_open_count);
11121 mce_chrdev_open_exclu = 0;
11122
11123 spin_unlock(&mce_chrdev_state_lock);
11124@@ -2147,7 +2148,7 @@ struct dentry *mce_get_debugfs_dir(void)
11125 static void mce_reset(void)
11126 {
11127 cpu_missing = 0;
11128- atomic_set(&mce_fake_paniced, 0);
11129+ atomic_set_unchecked(&mce_fake_paniced, 0);
11130 atomic_set(&mce_executing, 0);
11131 atomic_set(&mce_callin, 0);
11132 atomic_set(&global_nwo, 0);
11133diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c
11134--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-11 15:19:27.000000000 -0500
11135+++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-16 18:39:07.000000000 -0500
11136@@ -215,7 +215,9 @@ static int inject_init(void)
11137 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11138 return -ENOMEM;
11139 printk(KERN_INFO "Machine check injector initialized\n");
11140- mce_chrdev_ops.write = mce_write;
11141+ pax_open_kernel();
11142+ *(void **)&mce_chrdev_ops.write = mce_write;
11143+ pax_close_kernel();
11144 register_die_notifier(&mce_raise_nb);
11145 return 0;
11146 }
11147diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c
11148--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-11 15:19:27.000000000 -0500
11149+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-16 18:39:07.000000000 -0500
11150@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11151 u64 size_or_mask, size_and_mask;
11152 static bool mtrr_aps_delayed_init;
11153
11154-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11155+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11156
11157 const struct mtrr_ops *mtrr_if;
11158
11159diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h
11160--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-11 15:19:27.000000000 -0500
11161+++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-16 18:39:07.000000000 -0500
11162@@ -25,7 +25,7 @@ struct mtrr_ops {
11163 int (*validate_add_page)(unsigned long base, unsigned long size,
11164 unsigned int type);
11165 int (*have_wrcomb)(void);
11166-};
11167+} __do_const;
11168
11169 extern int generic_get_free_region(unsigned long base, unsigned long size,
11170 int replace_reg);
11171diff -urNp linux-3.1.1/arch/x86/kernel/cpu/perf_event.c linux-3.1.1/arch/x86/kernel/cpu/perf_event.c
11172--- linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-11 15:19:27.000000000 -0500
11173+++ linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-16 18:40:08.000000000 -0500
11174@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cp
11175 int i, j, w, wmax, num = 0;
11176 struct hw_perf_event *hwc;
11177
11178+ pax_track_stack();
11179+
11180 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11181
11182 for (i = 0; i < n; i++) {
11183@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchai
11184 break;
11185
11186 perf_callchain_store(entry, frame.return_address);
11187- fp = frame.next_frame;
11188+ fp = (const void __force_user *)frame.next_frame;
11189 }
11190 }
11191
11192diff -urNp linux-3.1.1/arch/x86/kernel/crash.c linux-3.1.1/arch/x86/kernel/crash.c
11193--- linux-3.1.1/arch/x86/kernel/crash.c 2011-11-11 15:19:27.000000000 -0500
11194+++ linux-3.1.1/arch/x86/kernel/crash.c 2011-11-16 18:39:07.000000000 -0500
11195@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11196 regs = args->regs;
11197
11198 #ifdef CONFIG_X86_32
11199- if (!user_mode_vm(regs)) {
11200+ if (!user_mode(regs)) {
11201 crash_fixup_ss_esp(&fixed_regs, regs);
11202 regs = &fixed_regs;
11203 }
11204diff -urNp linux-3.1.1/arch/x86/kernel/doublefault_32.c linux-3.1.1/arch/x86/kernel/doublefault_32.c
11205--- linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-11 15:19:27.000000000 -0500
11206+++ linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-16 18:39:07.000000000 -0500
11207@@ -11,7 +11,7 @@
11208
11209 #define DOUBLEFAULT_STACKSIZE (1024)
11210 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11211-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11212+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11213
11214 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11215
11216@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11217 unsigned long gdt, tss;
11218
11219 store_gdt(&gdt_desc);
11220- gdt = gdt_desc.address;
11221+ gdt = (unsigned long)gdt_desc.address;
11222
11223 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11224
11225@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11226 /* 0x2 bit is always set */
11227 .flags = X86_EFLAGS_SF | 0x2,
11228 .sp = STACK_START,
11229- .es = __USER_DS,
11230+ .es = __KERNEL_DS,
11231 .cs = __KERNEL_CS,
11232 .ss = __KERNEL_DS,
11233- .ds = __USER_DS,
11234+ .ds = __KERNEL_DS,
11235 .fs = __KERNEL_PERCPU,
11236
11237 .__cr3 = __pa_nodebug(swapper_pg_dir),
11238diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_32.c linux-3.1.1/arch/x86/kernel/dumpstack_32.c
11239--- linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-11 15:19:27.000000000 -0500
11240+++ linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-16 18:39:07.000000000 -0500
11241@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11242 bp = stack_frame(task, regs);
11243
11244 for (;;) {
11245- struct thread_info *context;
11246+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11247
11248- context = (struct thread_info *)
11249- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11250- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11251+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11252
11253- stack = (unsigned long *)context->previous_esp;
11254- if (!stack)
11255+ if (stack_start == task_stack_page(task))
11256 break;
11257+ stack = *(unsigned long **)stack_start;
11258 if (ops->stack(data, "IRQ") < 0)
11259 break;
11260 touch_nmi_watchdog();
11261@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11262 * When in-kernel, we also print out the stack and code at the
11263 * time of the fault..
11264 */
11265- if (!user_mode_vm(regs)) {
11266+ if (!user_mode(regs)) {
11267 unsigned int code_prologue = code_bytes * 43 / 64;
11268 unsigned int code_len = code_bytes;
11269 unsigned char c;
11270 u8 *ip;
11271+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11272
11273 printk(KERN_EMERG "Stack:\n");
11274 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11275
11276 printk(KERN_EMERG "Code: ");
11277
11278- ip = (u8 *)regs->ip - code_prologue;
11279+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11280 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11281 /* try starting at IP */
11282- ip = (u8 *)regs->ip;
11283+ ip = (u8 *)regs->ip + cs_base;
11284 code_len = code_len - code_prologue + 1;
11285 }
11286 for (i = 0; i < code_len; i++, ip++) {
11287@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11288 printk(" Bad EIP value.");
11289 break;
11290 }
11291- if (ip == (u8 *)regs->ip)
11292+ if (ip == (u8 *)regs->ip + cs_base)
11293 printk("<%02x> ", c);
11294 else
11295 printk("%02x ", c);
11296@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11297 {
11298 unsigned short ud2;
11299
11300+ ip = ktla_ktva(ip);
11301 if (ip < PAGE_OFFSET)
11302 return 0;
11303 if (probe_kernel_address((unsigned short *)ip, ud2))
11304@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
11305
11306 return ud2 == 0x0b0f;
11307 }
11308+
11309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11310+void pax_check_alloca(unsigned long size)
11311+{
11312+ unsigned long sp = (unsigned long)&sp, stack_left;
11313+
11314+ /* all kernel stacks are of the same size */
11315+ stack_left = sp & (THREAD_SIZE - 1);
11316+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11317+}
11318+EXPORT_SYMBOL(pax_check_alloca);
11319+#endif
11320diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_64.c linux-3.1.1/arch/x86/kernel/dumpstack_64.c
11321--- linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-11 15:19:27.000000000 -0500
11322+++ linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-16 18:39:07.000000000 -0500
11323@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
11324 unsigned long *irq_stack_end =
11325 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11326 unsigned used = 0;
11327- struct thread_info *tinfo;
11328 int graph = 0;
11329 unsigned long dummy;
11330+ void *stack_start;
11331
11332 if (!task)
11333 task = current;
11334@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
11335 * current stack address. If the stacks consist of nested
11336 * exceptions
11337 */
11338- tinfo = task_thread_info(task);
11339 for (;;) {
11340 char *id;
11341 unsigned long *estack_end;
11342+
11343 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11344 &used, &id);
11345
11346@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
11347 if (ops->stack(data, id) < 0)
11348 break;
11349
11350- bp = ops->walk_stack(tinfo, stack, bp, ops,
11351+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11352 data, estack_end, &graph);
11353 ops->stack(data, "<EOE>");
11354 /*
11355@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task
11356 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11357 if (ops->stack(data, "IRQ") < 0)
11358 break;
11359- bp = ops->walk_stack(tinfo, stack, bp,
11360+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11361 ops, data, irq_stack_end, &graph);
11362 /*
11363 * We link to the next stack (which would be
11364@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task
11365 /*
11366 * This handles the process stack:
11367 */
11368- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11369+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11370+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11371 put_cpu();
11372 }
11373 EXPORT_SYMBOL(dump_trace);
11374@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
11375
11376 return ud2 == 0x0b0f;
11377 }
11378+
11379+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11380+void pax_check_alloca(unsigned long size)
11381+{
11382+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
11383+ unsigned cpu, used;
11384+ char *id;
11385+
11386+ /* check the process stack first */
11387+ stack_start = (unsigned long)task_stack_page(current);
11388+ stack_end = stack_start + THREAD_SIZE;
11389+ if (likely(stack_start <= sp && sp < stack_end)) {
11390+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
11391+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11392+ return;
11393+ }
11394+
11395+ cpu = get_cpu();
11396+
11397+ /* check the irq stacks */
11398+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
11399+ stack_start = stack_end - IRQ_STACK_SIZE;
11400+ if (stack_start <= sp && sp < stack_end) {
11401+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
11402+ put_cpu();
11403+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11404+ return;
11405+ }
11406+
11407+ /* check the exception stacks */
11408+ used = 0;
11409+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
11410+ stack_start = stack_end - EXCEPTION_STKSZ;
11411+ if (stack_end && stack_start <= sp && sp < stack_end) {
11412+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
11413+ put_cpu();
11414+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11415+ return;
11416+ }
11417+
11418+ put_cpu();
11419+
11420+ /* unknown stack */
11421+ BUG();
11422+}
11423+EXPORT_SYMBOL(pax_check_alloca);
11424+#endif
11425diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack.c linux-3.1.1/arch/x86/kernel/dumpstack.c
11426--- linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-11 15:19:27.000000000 -0500
11427+++ linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-16 18:40:08.000000000 -0500
11428@@ -2,6 +2,9 @@
11429 * Copyright (C) 1991, 1992 Linus Torvalds
11430 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11431 */
11432+#ifdef CONFIG_GRKERNSEC_HIDESYM
11433+#define __INCLUDED_BY_HIDESYM 1
11434+#endif
11435 #include <linux/kallsyms.h>
11436 #include <linux/kprobes.h>
11437 #include <linux/uaccess.h>
11438@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11439 static void
11440 print_ftrace_graph_addr(unsigned long addr, void *data,
11441 const struct stacktrace_ops *ops,
11442- struct thread_info *tinfo, int *graph)
11443+ struct task_struct *task, int *graph)
11444 {
11445- struct task_struct *task = tinfo->task;
11446 unsigned long ret_addr;
11447 int index = task->curr_ret_stack;
11448
11449@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11450 static inline void
11451 print_ftrace_graph_addr(unsigned long addr, void *data,
11452 const struct stacktrace_ops *ops,
11453- struct thread_info *tinfo, int *graph)
11454+ struct task_struct *task, int *graph)
11455 { }
11456 #endif
11457
11458@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11459 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11460 */
11461
11462-static inline int valid_stack_ptr(struct thread_info *tinfo,
11463- void *p, unsigned int size, void *end)
11464+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11465 {
11466- void *t = tinfo;
11467 if (end) {
11468 if (p < end && p >= (end-THREAD_SIZE))
11469 return 1;
11470@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11471 }
11472
11473 unsigned long
11474-print_context_stack(struct thread_info *tinfo,
11475+print_context_stack(struct task_struct *task, void *stack_start,
11476 unsigned long *stack, unsigned long bp,
11477 const struct stacktrace_ops *ops, void *data,
11478 unsigned long *end, int *graph)
11479 {
11480 struct stack_frame *frame = (struct stack_frame *)bp;
11481
11482- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11483+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11484 unsigned long addr;
11485
11486 addr = *stack;
11487@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11488 } else {
11489 ops->address(data, addr, 0);
11490 }
11491- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11492+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11493 }
11494 stack++;
11495 }
11496@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11497 EXPORT_SYMBOL_GPL(print_context_stack);
11498
11499 unsigned long
11500-print_context_stack_bp(struct thread_info *tinfo,
11501+print_context_stack_bp(struct task_struct *task, void *stack_start,
11502 unsigned long *stack, unsigned long bp,
11503 const struct stacktrace_ops *ops, void *data,
11504 unsigned long *end, int *graph)
11505@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11506 struct stack_frame *frame = (struct stack_frame *)bp;
11507 unsigned long *ret_addr = &frame->return_address;
11508
11509- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11510+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11511 unsigned long addr = *ret_addr;
11512
11513 if (!__kernel_text_address(addr))
11514@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11515 ops->address(data, addr, 1);
11516 frame = frame->next_frame;
11517 ret_addr = &frame->return_address;
11518- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11519+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11520 }
11521
11522 return (unsigned long)frame;
11523@@ -186,7 +186,7 @@ void dump_stack(void)
11524
11525 bp = stack_frame(current, NULL);
11526 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11527- current->pid, current->comm, print_tainted(),
11528+ task_pid_nr(current), current->comm, print_tainted(),
11529 init_utsname()->release,
11530 (int)strcspn(init_utsname()->version, " "),
11531 init_utsname()->version);
11532@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11533 }
11534 EXPORT_SYMBOL_GPL(oops_begin);
11535
11536+extern void gr_handle_kernel_exploit(void);
11537+
11538 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11539 {
11540 if (regs && kexec_should_crash(current))
11541@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11542 panic("Fatal exception in interrupt");
11543 if (panic_on_oops)
11544 panic("Fatal exception");
11545- do_exit(signr);
11546+
11547+ gr_handle_kernel_exploit();
11548+
11549+ do_group_exit(signr);
11550 }
11551
11552 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11553@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11554
11555 show_registers(regs);
11556 #ifdef CONFIG_X86_32
11557- if (user_mode_vm(regs)) {
11558+ if (user_mode(regs)) {
11559 sp = regs->sp;
11560 ss = regs->ss & 0xffff;
11561 } else {
11562@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11563 unsigned long flags = oops_begin();
11564 int sig = SIGSEGV;
11565
11566- if (!user_mode_vm(regs))
11567+ if (!user_mode(regs))
11568 report_bug(regs->ip, regs);
11569
11570 if (__die(str, regs, err))
11571diff -urNp linux-3.1.1/arch/x86/kernel/early_printk.c linux-3.1.1/arch/x86/kernel/early_printk.c
11572--- linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-11 15:19:27.000000000 -0500
11573+++ linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-16 18:40:08.000000000 -0500
11574@@ -7,6 +7,7 @@
11575 #include <linux/pci_regs.h>
11576 #include <linux/pci_ids.h>
11577 #include <linux/errno.h>
11578+#include <linux/sched.h>
11579 #include <asm/io.h>
11580 #include <asm/processor.h>
11581 #include <asm/fcntl.h>
11582@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11583 int n;
11584 va_list ap;
11585
11586+ pax_track_stack();
11587+
11588 va_start(ap, fmt);
11589 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11590 early_console->write(early_console, buf, n);
11591diff -urNp linux-3.1.1/arch/x86/kernel/entry_32.S linux-3.1.1/arch/x86/kernel/entry_32.S
11592--- linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-11 15:19:27.000000000 -0500
11593+++ linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-16 18:40:08.000000000 -0500
11594@@ -186,13 +186,146 @@
11595 /*CFI_REL_OFFSET gs, PT_GS*/
11596 .endm
11597 .macro SET_KERNEL_GS reg
11598+
11599+#ifdef CONFIG_CC_STACKPROTECTOR
11600 movl $(__KERNEL_STACK_CANARY), \reg
11601+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11602+ movl $(__USER_DS), \reg
11603+#else
11604+ xorl \reg, \reg
11605+#endif
11606+
11607 movl \reg, %gs
11608 .endm
11609
11610 #endif /* CONFIG_X86_32_LAZY_GS */
11611
11612-.macro SAVE_ALL
11613+.macro pax_enter_kernel
11614+#ifdef CONFIG_PAX_KERNEXEC
11615+ call pax_enter_kernel
11616+#endif
11617+.endm
11618+
11619+.macro pax_exit_kernel
11620+#ifdef CONFIG_PAX_KERNEXEC
11621+ call pax_exit_kernel
11622+#endif
11623+.endm
11624+
11625+#ifdef CONFIG_PAX_KERNEXEC
11626+ENTRY(pax_enter_kernel)
11627+#ifdef CONFIG_PARAVIRT
11628+ pushl %eax
11629+ pushl %ecx
11630+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11631+ mov %eax, %esi
11632+#else
11633+ mov %cr0, %esi
11634+#endif
11635+ bts $16, %esi
11636+ jnc 1f
11637+ mov %cs, %esi
11638+ cmp $__KERNEL_CS, %esi
11639+ jz 3f
11640+ ljmp $__KERNEL_CS, $3f
11641+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11642+2:
11643+#ifdef CONFIG_PARAVIRT
11644+ mov %esi, %eax
11645+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11646+#else
11647+ mov %esi, %cr0
11648+#endif
11649+3:
11650+#ifdef CONFIG_PARAVIRT
11651+ popl %ecx
11652+ popl %eax
11653+#endif
11654+ ret
11655+ENDPROC(pax_enter_kernel)
11656+
11657+ENTRY(pax_exit_kernel)
11658+#ifdef CONFIG_PARAVIRT
11659+ pushl %eax
11660+ pushl %ecx
11661+#endif
11662+ mov %cs, %esi
11663+ cmp $__KERNEXEC_KERNEL_CS, %esi
11664+ jnz 2f
11665+#ifdef CONFIG_PARAVIRT
11666+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11667+ mov %eax, %esi
11668+#else
11669+ mov %cr0, %esi
11670+#endif
11671+ btr $16, %esi
11672+ ljmp $__KERNEL_CS, $1f
11673+1:
11674+#ifdef CONFIG_PARAVIRT
11675+ mov %esi, %eax
11676+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11677+#else
11678+ mov %esi, %cr0
11679+#endif
11680+2:
11681+#ifdef CONFIG_PARAVIRT
11682+ popl %ecx
11683+ popl %eax
11684+#endif
11685+ ret
11686+ENDPROC(pax_exit_kernel)
11687+#endif
11688+
11689+.macro pax_erase_kstack
11690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11691+ call pax_erase_kstack
11692+#endif
11693+.endm
11694+
11695+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11696+/*
11697+ * ebp: thread_info
11698+ * ecx, edx: can be clobbered
11699+ */
11700+ENTRY(pax_erase_kstack)
11701+ pushl %edi
11702+ pushl %eax
11703+
11704+ mov TI_lowest_stack(%ebp), %edi
11705+ mov $-0xBEEF, %eax
11706+ std
11707+
11708+1: mov %edi, %ecx
11709+ and $THREAD_SIZE_asm - 1, %ecx
11710+ shr $2, %ecx
11711+ repne scasl
11712+ jecxz 2f
11713+
11714+ cmp $2*16, %ecx
11715+ jc 2f
11716+
11717+ mov $2*16, %ecx
11718+ repe scasl
11719+ jecxz 2f
11720+ jne 1b
11721+
11722+2: cld
11723+ mov %esp, %ecx
11724+ sub %edi, %ecx
11725+ shr $2, %ecx
11726+ rep stosl
11727+
11728+ mov TI_task_thread_sp0(%ebp), %edi
11729+ sub $128, %edi
11730+ mov %edi, TI_lowest_stack(%ebp)
11731+
11732+ popl %eax
11733+ popl %edi
11734+ ret
11735+ENDPROC(pax_erase_kstack)
11736+#endif
11737+
11738+.macro __SAVE_ALL _DS
11739 cld
11740 PUSH_GS
11741 pushl_cfi %fs
11742@@ -215,7 +348,7 @@
11743 CFI_REL_OFFSET ecx, 0
11744 pushl_cfi %ebx
11745 CFI_REL_OFFSET ebx, 0
11746- movl $(__USER_DS), %edx
11747+ movl $\_DS, %edx
11748 movl %edx, %ds
11749 movl %edx, %es
11750 movl $(__KERNEL_PERCPU), %edx
11751@@ -223,6 +356,15 @@
11752 SET_KERNEL_GS %edx
11753 .endm
11754
11755+.macro SAVE_ALL
11756+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11757+ __SAVE_ALL __KERNEL_DS
11758+ pax_enter_kernel
11759+#else
11760+ __SAVE_ALL __USER_DS
11761+#endif
11762+.endm
11763+
11764 .macro RESTORE_INT_REGS
11765 popl_cfi %ebx
11766 CFI_RESTORE ebx
11767@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
11768 popfl_cfi
11769 jmp syscall_exit
11770 CFI_ENDPROC
11771-END(ret_from_fork)
11772+ENDPROC(ret_from_fork)
11773
11774 /*
11775 * Interrupt exit functions should be protected against kprobes
11776@@ -333,7 +475,15 @@ check_userspace:
11777 movb PT_CS(%esp), %al
11778 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11779 cmpl $USER_RPL, %eax
11780+
11781+#ifdef CONFIG_PAX_KERNEXEC
11782+ jae resume_userspace
11783+
11784+ PAX_EXIT_KERNEL
11785+ jmp resume_kernel
11786+#else
11787 jb resume_kernel # not returning to v8086 or userspace
11788+#endif
11789
11790 ENTRY(resume_userspace)
11791 LOCKDEP_SYS_EXIT
11792@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
11793 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11794 # int/exception return?
11795 jne work_pending
11796- jmp restore_all
11797-END(ret_from_exception)
11798+ jmp restore_all_pax
11799+ENDPROC(ret_from_exception)
11800
11801 #ifdef CONFIG_PREEMPT
11802 ENTRY(resume_kernel)
11803@@ -361,7 +511,7 @@ need_resched:
11804 jz restore_all
11805 call preempt_schedule_irq
11806 jmp need_resched
11807-END(resume_kernel)
11808+ENDPROC(resume_kernel)
11809 #endif
11810 CFI_ENDPROC
11811 /*
11812@@ -395,23 +545,34 @@ sysenter_past_esp:
11813 /*CFI_REL_OFFSET cs, 0*/
11814 /*
11815 * Push current_thread_info()->sysenter_return to the stack.
11816- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11817- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11818 */
11819- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11820+ pushl_cfi $0
11821 CFI_REL_OFFSET eip, 0
11822
11823 pushl_cfi %eax
11824 SAVE_ALL
11825+ GET_THREAD_INFO(%ebp)
11826+ movl TI_sysenter_return(%ebp),%ebp
11827+ movl %ebp,PT_EIP(%esp)
11828 ENABLE_INTERRUPTS(CLBR_NONE)
11829
11830 /*
11831 * Load the potential sixth argument from user stack.
11832 * Careful about security.
11833 */
11834+ movl PT_OLDESP(%esp),%ebp
11835+
11836+#ifdef CONFIG_PAX_MEMORY_UDEREF
11837+ mov PT_OLDSS(%esp),%ds
11838+1: movl %ds:(%ebp),%ebp
11839+ push %ss
11840+ pop %ds
11841+#else
11842 cmpl $__PAGE_OFFSET-3,%ebp
11843 jae syscall_fault
11844 1: movl (%ebp),%ebp
11845+#endif
11846+
11847 movl %ebp,PT_EBP(%esp)
11848 .section __ex_table,"a"
11849 .align 4
11850@@ -434,12 +595,24 @@ sysenter_do_call:
11851 testl $_TIF_ALLWORK_MASK, %ecx
11852 jne sysexit_audit
11853 sysenter_exit:
11854+
11855+#ifdef CONFIG_PAX_RANDKSTACK
11856+ pushl_cfi %eax
11857+ movl %esp, %eax
11858+ call pax_randomize_kstack
11859+ popl_cfi %eax
11860+#endif
11861+
11862+ pax_erase_kstack
11863+
11864 /* if something modifies registers it must also disable sysexit */
11865 movl PT_EIP(%esp), %edx
11866 movl PT_OLDESP(%esp), %ecx
11867 xorl %ebp,%ebp
11868 TRACE_IRQS_ON
11869 1: mov PT_FS(%esp), %fs
11870+2: mov PT_DS(%esp), %ds
11871+3: mov PT_ES(%esp), %es
11872 PTGS_TO_GS
11873 ENABLE_INTERRUPTS_SYSEXIT
11874
11875@@ -456,6 +629,9 @@ sysenter_audit:
11876 movl %eax,%edx /* 2nd arg: syscall number */
11877 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11878 call audit_syscall_entry
11879+
11880+ pax_erase_kstack
11881+
11882 pushl_cfi %ebx
11883 movl PT_EAX(%esp),%eax /* reload syscall number */
11884 jmp sysenter_do_call
11885@@ -482,11 +658,17 @@ sysexit_audit:
11886
11887 CFI_ENDPROC
11888 .pushsection .fixup,"ax"
11889-2: movl $0,PT_FS(%esp)
11890+4: movl $0,PT_FS(%esp)
11891+ jmp 1b
11892+5: movl $0,PT_DS(%esp)
11893+ jmp 1b
11894+6: movl $0,PT_ES(%esp)
11895 jmp 1b
11896 .section __ex_table,"a"
11897 .align 4
11898- .long 1b,2b
11899+ .long 1b,4b
11900+ .long 2b,5b
11901+ .long 3b,6b
11902 .popsection
11903 PTGS_TO_GS_EX
11904 ENDPROC(ia32_sysenter_target)
11905@@ -519,6 +701,15 @@ syscall_exit:
11906 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11907 jne syscall_exit_work
11908
11909+restore_all_pax:
11910+
11911+#ifdef CONFIG_PAX_RANDKSTACK
11912+ movl %esp, %eax
11913+ call pax_randomize_kstack
11914+#endif
11915+
11916+ pax_erase_kstack
11917+
11918 restore_all:
11919 TRACE_IRQS_IRET
11920 restore_all_notrace:
11921@@ -578,14 +769,34 @@ ldt_ss:
11922 * compensating for the offset by changing to the ESPFIX segment with
11923 * a base address that matches for the difference.
11924 */
11925-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11926+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11927 mov %esp, %edx /* load kernel esp */
11928 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11929 mov %dx, %ax /* eax: new kernel esp */
11930 sub %eax, %edx /* offset (low word is 0) */
11931+#ifdef CONFIG_SMP
11932+ movl PER_CPU_VAR(cpu_number), %ebx
11933+ shll $PAGE_SHIFT_asm, %ebx
11934+ addl $cpu_gdt_table, %ebx
11935+#else
11936+ movl $cpu_gdt_table, %ebx
11937+#endif
11938 shr $16, %edx
11939- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11940- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11941+
11942+#ifdef CONFIG_PAX_KERNEXEC
11943+ mov %cr0, %esi
11944+ btr $16, %esi
11945+ mov %esi, %cr0
11946+#endif
11947+
11948+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11949+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11950+
11951+#ifdef CONFIG_PAX_KERNEXEC
11952+ bts $16, %esi
11953+ mov %esi, %cr0
11954+#endif
11955+
11956 pushl_cfi $__ESPFIX_SS
11957 pushl_cfi %eax /* new kernel esp */
11958 /* Disable interrupts, but do not irqtrace this section: we
11959@@ -614,34 +825,28 @@ work_resched:
11960 movl TI_flags(%ebp), %ecx
11961 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11962 # than syscall tracing?
11963- jz restore_all
11964+ jz restore_all_pax
11965 testb $_TIF_NEED_RESCHED, %cl
11966 jnz work_resched
11967
11968 work_notifysig: # deal with pending signals and
11969 # notify-resume requests
11970+ movl %esp, %eax
11971 #ifdef CONFIG_VM86
11972 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11973- movl %esp, %eax
11974- jne work_notifysig_v86 # returning to kernel-space or
11975+ jz 1f # returning to kernel-space or
11976 # vm86-space
11977- xorl %edx, %edx
11978- call do_notify_resume
11979- jmp resume_userspace_sig
11980
11981- ALIGN
11982-work_notifysig_v86:
11983 pushl_cfi %ecx # save ti_flags for do_notify_resume
11984 call save_v86_state # %eax contains pt_regs pointer
11985 popl_cfi %ecx
11986 movl %eax, %esp
11987-#else
11988- movl %esp, %eax
11989+1:
11990 #endif
11991 xorl %edx, %edx
11992 call do_notify_resume
11993 jmp resume_userspace_sig
11994-END(work_pending)
11995+ENDPROC(work_pending)
11996
11997 # perform syscall exit tracing
11998 ALIGN
11999@@ -649,11 +854,14 @@ syscall_trace_entry:
12000 movl $-ENOSYS,PT_EAX(%esp)
12001 movl %esp, %eax
12002 call syscall_trace_enter
12003+
12004+ pax_erase_kstack
12005+
12006 /* What it returned is what we'll actually use. */
12007 cmpl $(nr_syscalls), %eax
12008 jnae syscall_call
12009 jmp syscall_exit
12010-END(syscall_trace_entry)
12011+ENDPROC(syscall_trace_entry)
12012
12013 # perform syscall exit tracing
12014 ALIGN
12015@@ -666,20 +874,24 @@ syscall_exit_work:
12016 movl %esp, %eax
12017 call syscall_trace_leave
12018 jmp resume_userspace
12019-END(syscall_exit_work)
12020+ENDPROC(syscall_exit_work)
12021 CFI_ENDPROC
12022
12023 RING0_INT_FRAME # can't unwind into user space anyway
12024 syscall_fault:
12025+#ifdef CONFIG_PAX_MEMORY_UDEREF
12026+ push %ss
12027+ pop %ds
12028+#endif
12029 GET_THREAD_INFO(%ebp)
12030 movl $-EFAULT,PT_EAX(%esp)
12031 jmp resume_userspace
12032-END(syscall_fault)
12033+ENDPROC(syscall_fault)
12034
12035 syscall_badsys:
12036 movl $-ENOSYS,PT_EAX(%esp)
12037 jmp resume_userspace
12038-END(syscall_badsys)
12039+ENDPROC(syscall_badsys)
12040 CFI_ENDPROC
12041 /*
12042 * End of kprobes section
12043@@ -753,6 +965,36 @@ ptregs_clone:
12044 CFI_ENDPROC
12045 ENDPROC(ptregs_clone)
12046
12047+ ALIGN;
12048+ENTRY(kernel_execve)
12049+ CFI_STARTPROC
12050+ pushl_cfi %ebp
12051+ sub $PT_OLDSS+4,%esp
12052+ pushl_cfi %edi
12053+ pushl_cfi %ecx
12054+ pushl_cfi %eax
12055+ lea 3*4(%esp),%edi
12056+ mov $PT_OLDSS/4+1,%ecx
12057+ xorl %eax,%eax
12058+ rep stosl
12059+ popl_cfi %eax
12060+ popl_cfi %ecx
12061+ popl_cfi %edi
12062+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12063+ pushl_cfi %esp
12064+ call sys_execve
12065+ add $4,%esp
12066+ CFI_ADJUST_CFA_OFFSET -4
12067+ GET_THREAD_INFO(%ebp)
12068+ test %eax,%eax
12069+ jz syscall_exit
12070+ add $PT_OLDSS+4,%esp
12071+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12072+ popl_cfi %ebp
12073+ ret
12074+ CFI_ENDPROC
12075+ENDPROC(kernel_execve)
12076+
12077 .macro FIXUP_ESPFIX_STACK
12078 /*
12079 * Switch back for ESPFIX stack to the normal zerobased stack
12080@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
12081 * normal stack and adjusts ESP with the matching offset.
12082 */
12083 /* fixup the stack */
12084- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12085- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12086+#ifdef CONFIG_SMP
12087+ movl PER_CPU_VAR(cpu_number), %ebx
12088+ shll $PAGE_SHIFT_asm, %ebx
12089+ addl $cpu_gdt_table, %ebx
12090+#else
12091+ movl $cpu_gdt_table, %ebx
12092+#endif
12093+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12094+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12095 shl $16, %eax
12096 addl %esp, %eax /* the adjusted stack pointer */
12097 pushl_cfi $__KERNEL_DS
12098@@ -816,7 +1065,7 @@ vector=vector+1
12099 .endr
12100 2: jmp common_interrupt
12101 .endr
12102-END(irq_entries_start)
12103+ENDPROC(irq_entries_start)
12104
12105 .previous
12106 END(interrupt)
12107@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
12108 pushl_cfi $do_coprocessor_error
12109 jmp error_code
12110 CFI_ENDPROC
12111-END(coprocessor_error)
12112+ENDPROC(coprocessor_error)
12113
12114 ENTRY(simd_coprocessor_error)
12115 RING0_INT_FRAME
12116@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
12117 #endif
12118 jmp error_code
12119 CFI_ENDPROC
12120-END(simd_coprocessor_error)
12121+ENDPROC(simd_coprocessor_error)
12122
12123 ENTRY(device_not_available)
12124 RING0_INT_FRAME
12125@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
12126 pushl_cfi $do_device_not_available
12127 jmp error_code
12128 CFI_ENDPROC
12129-END(device_not_available)
12130+ENDPROC(device_not_available)
12131
12132 #ifdef CONFIG_PARAVIRT
12133 ENTRY(native_iret)
12134@@ -902,12 +1151,12 @@ ENTRY(native_iret)
12135 .align 4
12136 .long native_iret, iret_exc
12137 .previous
12138-END(native_iret)
12139+ENDPROC(native_iret)
12140
12141 ENTRY(native_irq_enable_sysexit)
12142 sti
12143 sysexit
12144-END(native_irq_enable_sysexit)
12145+ENDPROC(native_irq_enable_sysexit)
12146 #endif
12147
12148 ENTRY(overflow)
12149@@ -916,7 +1165,7 @@ ENTRY(overflow)
12150 pushl_cfi $do_overflow
12151 jmp error_code
12152 CFI_ENDPROC
12153-END(overflow)
12154+ENDPROC(overflow)
12155
12156 ENTRY(bounds)
12157 RING0_INT_FRAME
12158@@ -924,7 +1173,7 @@ ENTRY(bounds)
12159 pushl_cfi $do_bounds
12160 jmp error_code
12161 CFI_ENDPROC
12162-END(bounds)
12163+ENDPROC(bounds)
12164
12165 ENTRY(invalid_op)
12166 RING0_INT_FRAME
12167@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
12168 pushl_cfi $do_invalid_op
12169 jmp error_code
12170 CFI_ENDPROC
12171-END(invalid_op)
12172+ENDPROC(invalid_op)
12173
12174 ENTRY(coprocessor_segment_overrun)
12175 RING0_INT_FRAME
12176@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
12177 pushl_cfi $do_coprocessor_segment_overrun
12178 jmp error_code
12179 CFI_ENDPROC
12180-END(coprocessor_segment_overrun)
12181+ENDPROC(coprocessor_segment_overrun)
12182
12183 ENTRY(invalid_TSS)
12184 RING0_EC_FRAME
12185 pushl_cfi $do_invalid_TSS
12186 jmp error_code
12187 CFI_ENDPROC
12188-END(invalid_TSS)
12189+ENDPROC(invalid_TSS)
12190
12191 ENTRY(segment_not_present)
12192 RING0_EC_FRAME
12193 pushl_cfi $do_segment_not_present
12194 jmp error_code
12195 CFI_ENDPROC
12196-END(segment_not_present)
12197+ENDPROC(segment_not_present)
12198
12199 ENTRY(stack_segment)
12200 RING0_EC_FRAME
12201 pushl_cfi $do_stack_segment
12202 jmp error_code
12203 CFI_ENDPROC
12204-END(stack_segment)
12205+ENDPROC(stack_segment)
12206
12207 ENTRY(alignment_check)
12208 RING0_EC_FRAME
12209 pushl_cfi $do_alignment_check
12210 jmp error_code
12211 CFI_ENDPROC
12212-END(alignment_check)
12213+ENDPROC(alignment_check)
12214
12215 ENTRY(divide_error)
12216 RING0_INT_FRAME
12217@@ -976,7 +1225,7 @@ ENTRY(divide_error)
12218 pushl_cfi $do_divide_error
12219 jmp error_code
12220 CFI_ENDPROC
12221-END(divide_error)
12222+ENDPROC(divide_error)
12223
12224 #ifdef CONFIG_X86_MCE
12225 ENTRY(machine_check)
12226@@ -985,7 +1234,7 @@ ENTRY(machine_check)
12227 pushl_cfi machine_check_vector
12228 jmp error_code
12229 CFI_ENDPROC
12230-END(machine_check)
12231+ENDPROC(machine_check)
12232 #endif
12233
12234 ENTRY(spurious_interrupt_bug)
12235@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
12236 pushl_cfi $do_spurious_interrupt_bug
12237 jmp error_code
12238 CFI_ENDPROC
12239-END(spurious_interrupt_bug)
12240+ENDPROC(spurious_interrupt_bug)
12241 /*
12242 * End of kprobes section
12243 */
12244@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12245
12246 ENTRY(mcount)
12247 ret
12248-END(mcount)
12249+ENDPROC(mcount)
12250
12251 ENTRY(ftrace_caller)
12252 cmpl $0, function_trace_stop
12253@@ -1138,7 +1387,7 @@ ftrace_graph_call:
12254 .globl ftrace_stub
12255 ftrace_stub:
12256 ret
12257-END(ftrace_caller)
12258+ENDPROC(ftrace_caller)
12259
12260 #else /* ! CONFIG_DYNAMIC_FTRACE */
12261
12262@@ -1174,7 +1423,7 @@ trace:
12263 popl %ecx
12264 popl %eax
12265 jmp ftrace_stub
12266-END(mcount)
12267+ENDPROC(mcount)
12268 #endif /* CONFIG_DYNAMIC_FTRACE */
12269 #endif /* CONFIG_FUNCTION_TRACER */
12270
12271@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
12272 popl %ecx
12273 popl %eax
12274 ret
12275-END(ftrace_graph_caller)
12276+ENDPROC(ftrace_graph_caller)
12277
12278 .globl return_to_handler
12279 return_to_handler:
12280@@ -1209,7 +1458,6 @@ return_to_handler:
12281 jmp *%ecx
12282 #endif
12283
12284-.section .rodata,"a"
12285 #include "syscall_table_32.S"
12286
12287 syscall_table_size=(.-sys_call_table)
12288@@ -1255,15 +1503,18 @@ error_code:
12289 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12290 REG_TO_PTGS %ecx
12291 SET_KERNEL_GS %ecx
12292- movl $(__USER_DS), %ecx
12293+ movl $(__KERNEL_DS), %ecx
12294 movl %ecx, %ds
12295 movl %ecx, %es
12296+
12297+ pax_enter_kernel
12298+
12299 TRACE_IRQS_OFF
12300 movl %esp,%eax # pt_regs pointer
12301 call *%edi
12302 jmp ret_from_exception
12303 CFI_ENDPROC
12304-END(page_fault)
12305+ENDPROC(page_fault)
12306
12307 /*
12308 * Debug traps and NMI can happen at the one SYSENTER instruction
12309@@ -1305,7 +1556,7 @@ debug_stack_correct:
12310 call do_debug
12311 jmp ret_from_exception
12312 CFI_ENDPROC
12313-END(debug)
12314+ENDPROC(debug)
12315
12316 /*
12317 * NMI is doubly nasty. It can happen _while_ we're handling
12318@@ -1342,6 +1593,9 @@ nmi_stack_correct:
12319 xorl %edx,%edx # zero error code
12320 movl %esp,%eax # pt_regs pointer
12321 call do_nmi
12322+
12323+ pax_exit_kernel
12324+
12325 jmp restore_all_notrace
12326 CFI_ENDPROC
12327
12328@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
12329 FIXUP_ESPFIX_STACK # %eax == %esp
12330 xorl %edx,%edx # zero error code
12331 call do_nmi
12332+
12333+ pax_exit_kernel
12334+
12335 RESTORE_REGS
12336 lss 12+4(%esp), %esp # back to espfix stack
12337 CFI_ADJUST_CFA_OFFSET -24
12338 jmp irq_return
12339 CFI_ENDPROC
12340-END(nmi)
12341+ENDPROC(nmi)
12342
12343 ENTRY(int3)
12344 RING0_INT_FRAME
12345@@ -1395,14 +1652,14 @@ ENTRY(int3)
12346 call do_int3
12347 jmp ret_from_exception
12348 CFI_ENDPROC
12349-END(int3)
12350+ENDPROC(int3)
12351
12352 ENTRY(general_protection)
12353 RING0_EC_FRAME
12354 pushl_cfi $do_general_protection
12355 jmp error_code
12356 CFI_ENDPROC
12357-END(general_protection)
12358+ENDPROC(general_protection)
12359
12360 #ifdef CONFIG_KVM_GUEST
12361 ENTRY(async_page_fault)
12362@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
12363 pushl_cfi $do_async_page_fault
12364 jmp error_code
12365 CFI_ENDPROC
12366-END(async_page_fault)
12367+ENDPROC(async_page_fault)
12368 #endif
12369
12370 /*
12371diff -urNp linux-3.1.1/arch/x86/kernel/entry_64.S linux-3.1.1/arch/x86/kernel/entry_64.S
12372--- linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-11 15:19:27.000000000 -0500
12373+++ linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-16 18:40:08.000000000 -0500
12374@@ -55,6 +55,8 @@
12375 #include <asm/paravirt.h>
12376 #include <asm/ftrace.h>
12377 #include <asm/percpu.h>
12378+#include <asm/pgtable.h>
12379+#include <asm/alternative-asm.h>
12380
12381 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12382 #include <linux/elf-em.h>
12383@@ -68,8 +70,9 @@
12384 #ifdef CONFIG_FUNCTION_TRACER
12385 #ifdef CONFIG_DYNAMIC_FTRACE
12386 ENTRY(mcount)
12387+ pax_force_retaddr
12388 retq
12389-END(mcount)
12390+ENDPROC(mcount)
12391
12392 ENTRY(ftrace_caller)
12393 cmpl $0, function_trace_stop
12394@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
12395 #endif
12396
12397 GLOBAL(ftrace_stub)
12398+ pax_force_retaddr
12399 retq
12400-END(ftrace_caller)
12401+ENDPROC(ftrace_caller)
12402
12403 #else /* ! CONFIG_DYNAMIC_FTRACE */
12404 ENTRY(mcount)
12405@@ -112,6 +116,7 @@ ENTRY(mcount)
12406 #endif
12407
12408 GLOBAL(ftrace_stub)
12409+ pax_force_retaddr
12410 retq
12411
12412 trace:
12413@@ -121,12 +126,13 @@ trace:
12414 movq 8(%rbp), %rsi
12415 subq $MCOUNT_INSN_SIZE, %rdi
12416
12417+ pax_force_fptr ftrace_trace_function
12418 call *ftrace_trace_function
12419
12420 MCOUNT_RESTORE_FRAME
12421
12422 jmp ftrace_stub
12423-END(mcount)
12424+ENDPROC(mcount)
12425 #endif /* CONFIG_DYNAMIC_FTRACE */
12426 #endif /* CONFIG_FUNCTION_TRACER */
12427
12428@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
12429
12430 MCOUNT_RESTORE_FRAME
12431
12432+ pax_force_retaddr
12433 retq
12434-END(ftrace_graph_caller)
12435+ENDPROC(ftrace_graph_caller)
12436
12437 GLOBAL(return_to_handler)
12438 subq $24, %rsp
12439@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
12440 movq 8(%rsp), %rdx
12441 movq (%rsp), %rax
12442 addq $24, %rsp
12443+ pax_force_fptr %rdi
12444 jmp *%rdi
12445 #endif
12446
12447@@ -178,6 +186,269 @@ ENTRY(native_usergs_sysret64)
12448 ENDPROC(native_usergs_sysret64)
12449 #endif /* CONFIG_PARAVIRT */
12450
12451+ .macro ljmpq sel, off
12452+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12453+ .byte 0x48; ljmp *1234f(%rip)
12454+ .pushsection .rodata
12455+ .align 16
12456+ 1234: .quad \off; .word \sel
12457+ .popsection
12458+#else
12459+ pushq $\sel
12460+ pushq $\off
12461+ lretq
12462+#endif
12463+ .endm
12464+
12465+ .macro pax_enter_kernel
12466+#ifdef CONFIG_PAX_KERNEXEC
12467+ call pax_enter_kernel
12468+#endif
12469+ .endm
12470+
12471+ .macro pax_exit_kernel
12472+#ifdef CONFIG_PAX_KERNEXEC
12473+ call pax_exit_kernel
12474+#endif
12475+ .endm
12476+
12477+#ifdef CONFIG_PAX_KERNEXEC
12478+ENTRY(pax_enter_kernel)
12479+ pushq %rdi
12480+
12481+#ifdef CONFIG_PARAVIRT
12482+ PV_SAVE_REGS(CLBR_RDI)
12483+#endif
12484+
12485+ GET_CR0_INTO_RDI
12486+ bts $16,%rdi
12487+ jnc 1f
12488+ mov %cs,%edi
12489+ cmp $__KERNEL_CS,%edi
12490+ jz 3f
12491+ ljmpq __KERNEL_CS,3f
12492+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12493+2: SET_RDI_INTO_CR0
12494+3:
12495+
12496+#ifdef CONFIG_PARAVIRT
12497+ PV_RESTORE_REGS(CLBR_RDI)
12498+#endif
12499+
12500+ popq %rdi
12501+ pax_force_retaddr
12502+ retq
12503+ENDPROC(pax_enter_kernel)
12504+
12505+ENTRY(pax_exit_kernel)
12506+ pushq %rdi
12507+
12508+#ifdef CONFIG_PARAVIRT
12509+ PV_SAVE_REGS(CLBR_RDI)
12510+#endif
12511+
12512+ mov %cs,%rdi
12513+ cmp $__KERNEXEC_KERNEL_CS,%edi
12514+ jnz 2f
12515+ GET_CR0_INTO_RDI
12516+ btr $16,%rdi
12517+ ljmpq __KERNEL_CS,1f
12518+1: SET_RDI_INTO_CR0
12519+2:
12520+
12521+#ifdef CONFIG_PARAVIRT
12522+ PV_RESTORE_REGS(CLBR_RDI);
12523+#endif
12524+
12525+ popq %rdi
12526+ pax_force_retaddr
12527+ retq
12528+ENDPROC(pax_exit_kernel)
12529+#endif
12530+
12531+ .macro pax_enter_kernel_user
12532+#ifdef CONFIG_PAX_MEMORY_UDEREF
12533+ call pax_enter_kernel_user
12534+#endif
12535+ .endm
12536+
12537+ .macro pax_exit_kernel_user
12538+#ifdef CONFIG_PAX_MEMORY_UDEREF
12539+ call pax_exit_kernel_user
12540+#endif
12541+#ifdef CONFIG_PAX_RANDKSTACK
12542+ push %rax
12543+ call pax_randomize_kstack
12544+ pop %rax
12545+#endif
12546+ .endm
12547+
12548+#ifdef CONFIG_PAX_MEMORY_UDEREF
12549+ENTRY(pax_enter_kernel_user)
12550+ pushq %rdi
12551+ pushq %rbx
12552+
12553+#ifdef CONFIG_PARAVIRT
12554+ PV_SAVE_REGS(CLBR_RDI)
12555+#endif
12556+
12557+ GET_CR3_INTO_RDI
12558+ mov %rdi,%rbx
12559+ add $__START_KERNEL_map,%rbx
12560+ sub phys_base(%rip),%rbx
12561+
12562+#ifdef CONFIG_PARAVIRT
12563+ pushq %rdi
12564+ cmpl $0, pv_info+PARAVIRT_enabled
12565+ jz 1f
12566+ i = 0
12567+ .rept USER_PGD_PTRS
12568+ mov i*8(%rbx),%rsi
12569+ mov $0,%sil
12570+ lea i*8(%rbx),%rdi
12571+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12572+ i = i + 1
12573+ .endr
12574+ jmp 2f
12575+1:
12576+#endif
12577+
12578+ i = 0
12579+ .rept USER_PGD_PTRS
12580+ movb $0,i*8(%rbx)
12581+ i = i + 1
12582+ .endr
12583+
12584+#ifdef CONFIG_PARAVIRT
12585+2: popq %rdi
12586+#endif
12587+ SET_RDI_INTO_CR3
12588+
12589+#ifdef CONFIG_PAX_KERNEXEC
12590+ GET_CR0_INTO_RDI
12591+ bts $16,%rdi
12592+ SET_RDI_INTO_CR0
12593+#endif
12594+
12595+#ifdef CONFIG_PARAVIRT
12596+ PV_RESTORE_REGS(CLBR_RDI)
12597+#endif
12598+
12599+ popq %rbx
12600+ popq %rdi
12601+ pax_force_retaddr
12602+ retq
12603+ENDPROC(pax_enter_kernel_user)
12604+
12605+ENTRY(pax_exit_kernel_user)
12606+ push %rdi
12607+
12608+#ifdef CONFIG_PARAVIRT
12609+ pushq %rbx
12610+ PV_SAVE_REGS(CLBR_RDI)
12611+#endif
12612+
12613+#ifdef CONFIG_PAX_KERNEXEC
12614+ GET_CR0_INTO_RDI
12615+ btr $16,%rdi
12616+ SET_RDI_INTO_CR0
12617+#endif
12618+
12619+ GET_CR3_INTO_RDI
12620+ add $__START_KERNEL_map,%rdi
12621+ sub phys_base(%rip),%rdi
12622+
12623+#ifdef CONFIG_PARAVIRT
12624+ cmpl $0, pv_info+PARAVIRT_enabled
12625+ jz 1f
12626+ mov %rdi,%rbx
12627+ i = 0
12628+ .rept USER_PGD_PTRS
12629+ mov i*8(%rbx),%rsi
12630+ mov $0x67,%sil
12631+ lea i*8(%rbx),%rdi
12632+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12633+ i = i + 1
12634+ .endr
12635+ jmp 2f
12636+1:
12637+#endif
12638+
12639+ i = 0
12640+ .rept USER_PGD_PTRS
12641+ movb $0x67,i*8(%rdi)
12642+ i = i + 1
12643+ .endr
12644+
12645+#ifdef CONFIG_PARAVIRT
12646+2: PV_RESTORE_REGS(CLBR_RDI)
12647+ popq %rbx
12648+#endif
12649+
12650+ popq %rdi
12651+ pax_force_retaddr
12652+ retq
12653+ENDPROC(pax_exit_kernel_user)
12654+#endif
12655+
12656+ .macro pax_erase_kstack
12657+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12658+ call pax_erase_kstack
12659+#endif
12660+ .endm
12661+
12662+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12663+/*
12664+ * r10: thread_info
12665+ * rcx, rdx: can be clobbered
12666+ */
12667+ENTRY(pax_erase_kstack)
12668+ pushq %rdi
12669+ pushq %rax
12670+ pushq %r10
12671+
12672+ GET_THREAD_INFO(%r10)
12673+ mov TI_lowest_stack(%r10), %rdi
12674+ mov $-0xBEEF, %rax
12675+ std
12676+
12677+1: mov %edi, %ecx
12678+ and $THREAD_SIZE_asm - 1, %ecx
12679+ shr $3, %ecx
12680+ repne scasq
12681+ jecxz 2f
12682+
12683+ cmp $2*8, %ecx
12684+ jc 2f
12685+
12686+ mov $2*8, %ecx
12687+ repe scasq
12688+ jecxz 2f
12689+ jne 1b
12690+
12691+2: cld
12692+ mov %esp, %ecx
12693+ sub %edi, %ecx
12694+
12695+ cmp $THREAD_SIZE_asm, %rcx
12696+ jb 3f
12697+ ud2
12698+3:
12699+
12700+ shr $3, %ecx
12701+ rep stosq
12702+
12703+ mov TI_task_thread_sp0(%r10), %rdi
12704+ sub $256, %rdi
12705+ mov %rdi, TI_lowest_stack(%r10)
12706+
12707+ popq %r10
12708+ popq %rax
12709+ popq %rdi
12710+ pax_force_retaddr
12711+ ret
12712+ENDPROC(pax_erase_kstack)
12713+#endif
12714
12715 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12716 #ifdef CONFIG_TRACE_IRQFLAGS
12717@@ -319,7 +590,7 @@ ENDPROC(native_usergs_sysret64)
12718 movq %rsp, %rsi
12719
12720 leaq -RBP(%rsp),%rdi /* arg1 for handler */
12721- testl $3, CS(%rdi)
12722+ testb $3, CS(%rdi)
12723 je 1f
12724 SWAPGS
12725 /*
12726@@ -350,9 +621,10 @@ ENTRY(save_rest)
12727 movq_cfi r15, R15+16
12728 movq %r11, 8(%rsp) /* return address */
12729 FIXUP_TOP_OF_STACK %r11, 16
12730+ pax_force_retaddr
12731 ret
12732 CFI_ENDPROC
12733-END(save_rest)
12734+ENDPROC(save_rest)
12735
12736 /* save complete stack frame */
12737 .pushsection .kprobes.text, "ax"
12738@@ -381,9 +653,10 @@ ENTRY(save_paranoid)
12739 js 1f /* negative -> in kernel */
12740 SWAPGS
12741 xorl %ebx,%ebx
12742-1: ret
12743+1: pax_force_retaddr
12744+ ret
12745 CFI_ENDPROC
12746-END(save_paranoid)
12747+ENDPROC(save_paranoid)
12748 .popsection
12749
12750 /*
12751@@ -405,7 +678,7 @@ ENTRY(ret_from_fork)
12752
12753 RESTORE_REST
12754
12755- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12756+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12757 je int_ret_from_sys_call
12758
12759 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12760@@ -415,7 +688,7 @@ ENTRY(ret_from_fork)
12761 jmp ret_from_sys_call # go to the SYSRET fastpath
12762
12763 CFI_ENDPROC
12764-END(ret_from_fork)
12765+ENDPROC(ret_from_fork)
12766
12767 /*
12768 * System call entry. Up to 6 arguments in registers are supported.
12769@@ -451,7 +724,7 @@ END(ret_from_fork)
12770 ENTRY(system_call)
12771 CFI_STARTPROC simple
12772 CFI_SIGNAL_FRAME
12773- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12774+ CFI_DEF_CFA rsp,0
12775 CFI_REGISTER rip,rcx
12776 /*CFI_REGISTER rflags,r11*/
12777 SWAPGS_UNSAFE_STACK
12778@@ -464,12 +737,13 @@ ENTRY(system_call_after_swapgs)
12779
12780 movq %rsp,PER_CPU_VAR(old_rsp)
12781 movq PER_CPU_VAR(kernel_stack),%rsp
12782+ pax_enter_kernel_user
12783 /*
12784 * No need to follow this irqs off/on section - it's straight
12785 * and short:
12786 */
12787 ENABLE_INTERRUPTS(CLBR_NONE)
12788- SAVE_ARGS 8,0
12789+ SAVE_ARGS 8*6,0
12790 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12791 movq %rcx,RIP-ARGOFFSET(%rsp)
12792 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12793@@ -498,6 +772,8 @@ sysret_check:
12794 andl %edi,%edx
12795 jnz sysret_careful
12796 CFI_REMEMBER_STATE
12797+ pax_exit_kernel_user
12798+ pax_erase_kstack
12799 /*
12800 * sysretq will re-enable interrupts:
12801 */
12802@@ -556,6 +832,9 @@ auditsys:
12803 movq %rax,%rsi /* 2nd arg: syscall number */
12804 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12805 call audit_syscall_entry
12806+
12807+ pax_erase_kstack
12808+
12809 LOAD_ARGS 0 /* reload call-clobbered registers */
12810 jmp system_call_fastpath
12811
12812@@ -586,6 +865,9 @@ tracesys:
12813 FIXUP_TOP_OF_STACK %rdi
12814 movq %rsp,%rdi
12815 call syscall_trace_enter
12816+
12817+ pax_erase_kstack
12818+
12819 /*
12820 * Reload arg registers from stack in case ptrace changed them.
12821 * We don't reload %rax because syscall_trace_enter() returned
12822@@ -607,7 +889,7 @@ tracesys:
12823 GLOBAL(int_ret_from_sys_call)
12824 DISABLE_INTERRUPTS(CLBR_NONE)
12825 TRACE_IRQS_OFF
12826- testl $3,CS-ARGOFFSET(%rsp)
12827+ testb $3,CS-ARGOFFSET(%rsp)
12828 je retint_restore_args
12829 movl $_TIF_ALLWORK_MASK,%edi
12830 /* edi: mask to check */
12831@@ -664,7 +946,7 @@ int_restore_rest:
12832 TRACE_IRQS_OFF
12833 jmp int_with_check
12834 CFI_ENDPROC
12835-END(system_call)
12836+ENDPROC(system_call)
12837
12838 /*
12839 * Certain special system calls that need to save a complete full stack frame.
12840@@ -680,7 +962,7 @@ ENTRY(\label)
12841 call \func
12842 jmp ptregscall_common
12843 CFI_ENDPROC
12844-END(\label)
12845+ENDPROC(\label)
12846 .endm
12847
12848 PTREGSCALL stub_clone, sys_clone, %r8
12849@@ -698,9 +980,10 @@ ENTRY(ptregscall_common)
12850 movq_cfi_restore R12+8, r12
12851 movq_cfi_restore RBP+8, rbp
12852 movq_cfi_restore RBX+8, rbx
12853+ pax_force_retaddr
12854 ret $REST_SKIP /* pop extended registers */
12855 CFI_ENDPROC
12856-END(ptregscall_common)
12857+ENDPROC(ptregscall_common)
12858
12859 ENTRY(stub_execve)
12860 CFI_STARTPROC
12861@@ -715,7 +998,7 @@ ENTRY(stub_execve)
12862 RESTORE_REST
12863 jmp int_ret_from_sys_call
12864 CFI_ENDPROC
12865-END(stub_execve)
12866+ENDPROC(stub_execve)
12867
12868 /*
12869 * sigreturn is special because it needs to restore all registers on return.
12870@@ -733,7 +1016,7 @@ ENTRY(stub_rt_sigreturn)
12871 RESTORE_REST
12872 jmp int_ret_from_sys_call
12873 CFI_ENDPROC
12874-END(stub_rt_sigreturn)
12875+ENDPROC(stub_rt_sigreturn)
12876
12877 /*
12878 * Build the entry stubs and pointer table with some assembler magic.
12879@@ -768,7 +1051,7 @@ vector=vector+1
12880 2: jmp common_interrupt
12881 .endr
12882 CFI_ENDPROC
12883-END(irq_entries_start)
12884+ENDPROC(irq_entries_start)
12885
12886 .previous
12887 END(interrupt)
12888@@ -789,6 +1072,16 @@ END(interrupt)
12889 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12890 SAVE_ARGS_IRQ
12891 PARTIAL_FRAME 0
12892+#ifdef CONFIG_PAX_MEMORY_UDEREF
12893+ testb $3, CS(%rdi)
12894+ jnz 1f
12895+ pax_enter_kernel
12896+ jmp 2f
12897+1: pax_enter_kernel_user
12898+2:
12899+#else
12900+ pax_enter_kernel
12901+#endif
12902 call \func
12903 .endm
12904
12905@@ -820,7 +1113,7 @@ ret_from_intr:
12906
12907 exit_intr:
12908 GET_THREAD_INFO(%rcx)
12909- testl $3,CS-ARGOFFSET(%rsp)
12910+ testb $3,CS-ARGOFFSET(%rsp)
12911 je retint_kernel
12912
12913 /* Interrupt came from user space */
12914@@ -842,12 +1135,16 @@ retint_swapgs: /* return to user-space
12915 * The iretq could re-enable interrupts:
12916 */
12917 DISABLE_INTERRUPTS(CLBR_ANY)
12918+ pax_exit_kernel_user
12919+ pax_erase_kstack
12920 TRACE_IRQS_IRETQ
12921 SWAPGS
12922 jmp restore_args
12923
12924 retint_restore_args: /* return to kernel space */
12925 DISABLE_INTERRUPTS(CLBR_ANY)
12926+ pax_exit_kernel
12927+ pax_force_retaddr RIP-ARGOFFSET
12928 /*
12929 * The iretq could re-enable interrupts:
12930 */
12931@@ -936,7 +1233,7 @@ ENTRY(retint_kernel)
12932 #endif
12933
12934 CFI_ENDPROC
12935-END(common_interrupt)
12936+ENDPROC(common_interrupt)
12937 /*
12938 * End of kprobes section
12939 */
12940@@ -952,7 +1249,7 @@ ENTRY(\sym)
12941 interrupt \do_sym
12942 jmp ret_from_intr
12943 CFI_ENDPROC
12944-END(\sym)
12945+ENDPROC(\sym)
12946 .endm
12947
12948 #ifdef CONFIG_SMP
12949@@ -1017,12 +1314,22 @@ ENTRY(\sym)
12950 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12951 call error_entry
12952 DEFAULT_FRAME 0
12953+#ifdef CONFIG_PAX_MEMORY_UDEREF
12954+ testb $3, CS(%rsp)
12955+ jnz 1f
12956+ pax_enter_kernel
12957+ jmp 2f
12958+1: pax_enter_kernel_user
12959+2:
12960+#else
12961+ pax_enter_kernel
12962+#endif
12963 movq %rsp,%rdi /* pt_regs pointer */
12964 xorl %esi,%esi /* no error code */
12965 call \do_sym
12966 jmp error_exit /* %ebx: no swapgs flag */
12967 CFI_ENDPROC
12968-END(\sym)
12969+ENDPROC(\sym)
12970 .endm
12971
12972 .macro paranoidzeroentry sym do_sym
12973@@ -1034,15 +1341,25 @@ ENTRY(\sym)
12974 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12975 call save_paranoid
12976 TRACE_IRQS_OFF
12977+#ifdef CONFIG_PAX_MEMORY_UDEREF
12978+ testb $3, CS(%rsp)
12979+ jnz 1f
12980+ pax_enter_kernel
12981+ jmp 2f
12982+1: pax_enter_kernel_user
12983+2:
12984+#else
12985+ pax_enter_kernel
12986+#endif
12987 movq %rsp,%rdi /* pt_regs pointer */
12988 xorl %esi,%esi /* no error code */
12989 call \do_sym
12990 jmp paranoid_exit /* %ebx: no swapgs flag */
12991 CFI_ENDPROC
12992-END(\sym)
12993+ENDPROC(\sym)
12994 .endm
12995
12996-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12997+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12998 .macro paranoidzeroentry_ist sym do_sym ist
12999 ENTRY(\sym)
13000 INTR_FRAME
13001@@ -1052,14 +1369,30 @@ ENTRY(\sym)
13002 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13003 call save_paranoid
13004 TRACE_IRQS_OFF
13005+#ifdef CONFIG_PAX_MEMORY_UDEREF
13006+ testb $3, CS(%rsp)
13007+ jnz 1f
13008+ pax_enter_kernel
13009+ jmp 2f
13010+1: pax_enter_kernel_user
13011+2:
13012+#else
13013+ pax_enter_kernel
13014+#endif
13015 movq %rsp,%rdi /* pt_regs pointer */
13016 xorl %esi,%esi /* no error code */
13017+#ifdef CONFIG_SMP
13018+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
13019+ lea init_tss(%r12), %r12
13020+#else
13021+ lea init_tss(%rip), %r12
13022+#endif
13023 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13024 call \do_sym
13025 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13026 jmp paranoid_exit /* %ebx: no swapgs flag */
13027 CFI_ENDPROC
13028-END(\sym)
13029+ENDPROC(\sym)
13030 .endm
13031
13032 .macro errorentry sym do_sym
13033@@ -1070,13 +1403,23 @@ ENTRY(\sym)
13034 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13035 call error_entry
13036 DEFAULT_FRAME 0
13037+#ifdef CONFIG_PAX_MEMORY_UDEREF
13038+ testb $3, CS(%rsp)
13039+ jnz 1f
13040+ pax_enter_kernel
13041+ jmp 2f
13042+1: pax_enter_kernel_user
13043+2:
13044+#else
13045+ pax_enter_kernel
13046+#endif
13047 movq %rsp,%rdi /* pt_regs pointer */
13048 movq ORIG_RAX(%rsp),%rsi /* get error code */
13049 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13050 call \do_sym
13051 jmp error_exit /* %ebx: no swapgs flag */
13052 CFI_ENDPROC
13053-END(\sym)
13054+ENDPROC(\sym)
13055 .endm
13056
13057 /* error code is on the stack already */
13058@@ -1089,13 +1432,23 @@ ENTRY(\sym)
13059 call save_paranoid
13060 DEFAULT_FRAME 0
13061 TRACE_IRQS_OFF
13062+#ifdef CONFIG_PAX_MEMORY_UDEREF
13063+ testb $3, CS(%rsp)
13064+ jnz 1f
13065+ pax_enter_kernel
13066+ jmp 2f
13067+1: pax_enter_kernel_user
13068+2:
13069+#else
13070+ pax_enter_kernel
13071+#endif
13072 movq %rsp,%rdi /* pt_regs pointer */
13073 movq ORIG_RAX(%rsp),%rsi /* get error code */
13074 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13075 call \do_sym
13076 jmp paranoid_exit /* %ebx: no swapgs flag */
13077 CFI_ENDPROC
13078-END(\sym)
13079+ENDPROC(\sym)
13080 .endm
13081
13082 zeroentry divide_error do_divide_error
13083@@ -1125,9 +1478,10 @@ gs_change:
13084 2: mfence /* workaround */
13085 SWAPGS
13086 popfq_cfi
13087+ pax_force_retaddr
13088 ret
13089 CFI_ENDPROC
13090-END(native_load_gs_index)
13091+ENDPROC(native_load_gs_index)
13092
13093 .section __ex_table,"a"
13094 .align 8
13095@@ -1149,13 +1503,14 @@ ENTRY(kernel_thread_helper)
13096 * Here we are in the child and the registers are set as they were
13097 * at kernel_thread() invocation in the parent.
13098 */
13099+ pax_force_fptr %rsi
13100 call *%rsi
13101 # exit
13102 mov %eax, %edi
13103 call do_exit
13104 ud2 # padding for call trace
13105 CFI_ENDPROC
13106-END(kernel_thread_helper)
13107+ENDPROC(kernel_thread_helper)
13108
13109 /*
13110 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13111@@ -1184,9 +1539,10 @@ ENTRY(kernel_execve)
13112 je int_ret_from_sys_call
13113 RESTORE_ARGS
13114 UNFAKE_STACK_FRAME
13115+ pax_force_retaddr
13116 ret
13117 CFI_ENDPROC
13118-END(kernel_execve)
13119+ENDPROC(kernel_execve)
13120
13121 /* Call softirq on interrupt stack. Interrupts are off. */
13122 ENTRY(call_softirq)
13123@@ -1204,9 +1560,10 @@ ENTRY(call_softirq)
13124 CFI_DEF_CFA_REGISTER rsp
13125 CFI_ADJUST_CFA_OFFSET -8
13126 decl PER_CPU_VAR(irq_count)
13127+ pax_force_retaddr
13128 ret
13129 CFI_ENDPROC
13130-END(call_softirq)
13131+ENDPROC(call_softirq)
13132
13133 #ifdef CONFIG_XEN
13134 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13135@@ -1244,7 +1601,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13136 decl PER_CPU_VAR(irq_count)
13137 jmp error_exit
13138 CFI_ENDPROC
13139-END(xen_do_hypervisor_callback)
13140+ENDPROC(xen_do_hypervisor_callback)
13141
13142 /*
13143 * Hypervisor uses this for application faults while it executes.
13144@@ -1303,7 +1660,7 @@ ENTRY(xen_failsafe_callback)
13145 SAVE_ALL
13146 jmp error_exit
13147 CFI_ENDPROC
13148-END(xen_failsafe_callback)
13149+ENDPROC(xen_failsafe_callback)
13150
13151 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13152 xen_hvm_callback_vector xen_evtchn_do_upcall
13153@@ -1352,16 +1709,31 @@ ENTRY(paranoid_exit)
13154 TRACE_IRQS_OFF
13155 testl %ebx,%ebx /* swapgs needed? */
13156 jnz paranoid_restore
13157- testl $3,CS(%rsp)
13158+ testb $3,CS(%rsp)
13159 jnz paranoid_userspace
13160+#ifdef CONFIG_PAX_MEMORY_UDEREF
13161+ pax_exit_kernel
13162+ TRACE_IRQS_IRETQ 0
13163+ SWAPGS_UNSAFE_STACK
13164+ RESTORE_ALL 8
13165+ pax_force_retaddr
13166+ jmp irq_return
13167+#endif
13168 paranoid_swapgs:
13169+#ifdef CONFIG_PAX_MEMORY_UDEREF
13170+ pax_exit_kernel_user
13171+#else
13172+ pax_exit_kernel
13173+#endif
13174 TRACE_IRQS_IRETQ 0
13175 SWAPGS_UNSAFE_STACK
13176 RESTORE_ALL 8
13177 jmp irq_return
13178 paranoid_restore:
13179+ pax_exit_kernel
13180 TRACE_IRQS_IRETQ 0
13181 RESTORE_ALL 8
13182+ pax_force_retaddr
13183 jmp irq_return
13184 paranoid_userspace:
13185 GET_THREAD_INFO(%rcx)
13186@@ -1390,7 +1762,7 @@ paranoid_schedule:
13187 TRACE_IRQS_OFF
13188 jmp paranoid_userspace
13189 CFI_ENDPROC
13190-END(paranoid_exit)
13191+ENDPROC(paranoid_exit)
13192
13193 /*
13194 * Exception entry point. This expects an error code/orig_rax on the stack.
13195@@ -1417,12 +1789,13 @@ ENTRY(error_entry)
13196 movq_cfi r14, R14+8
13197 movq_cfi r15, R15+8
13198 xorl %ebx,%ebx
13199- testl $3,CS+8(%rsp)
13200+ testb $3,CS+8(%rsp)
13201 je error_kernelspace
13202 error_swapgs:
13203 SWAPGS
13204 error_sti:
13205 TRACE_IRQS_OFF
13206+ pax_force_retaddr
13207 ret
13208
13209 /*
13210@@ -1449,7 +1822,7 @@ bstep_iret:
13211 movq %rcx,RIP+8(%rsp)
13212 jmp error_swapgs
13213 CFI_ENDPROC
13214-END(error_entry)
13215+ENDPROC(error_entry)
13216
13217
13218 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13219@@ -1469,7 +1842,7 @@ ENTRY(error_exit)
13220 jnz retint_careful
13221 jmp retint_swapgs
13222 CFI_ENDPROC
13223-END(error_exit)
13224+ENDPROC(error_exit)
13225
13226
13227 /* runs on exception stack */
13228@@ -1481,6 +1854,16 @@ ENTRY(nmi)
13229 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13230 call save_paranoid
13231 DEFAULT_FRAME 0
13232+#ifdef CONFIG_PAX_MEMORY_UDEREF
13233+ testb $3, CS(%rsp)
13234+ jnz 1f
13235+ pax_enter_kernel
13236+ jmp 2f
13237+1: pax_enter_kernel_user
13238+2:
13239+#else
13240+ pax_enter_kernel
13241+#endif
13242 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13243 movq %rsp,%rdi
13244 movq $-1,%rsi
13245@@ -1491,12 +1874,28 @@ ENTRY(nmi)
13246 DISABLE_INTERRUPTS(CLBR_NONE)
13247 testl %ebx,%ebx /* swapgs needed? */
13248 jnz nmi_restore
13249- testl $3,CS(%rsp)
13250+ testb $3,CS(%rsp)
13251 jnz nmi_userspace
13252+#ifdef CONFIG_PAX_MEMORY_UDEREF
13253+ pax_exit_kernel
13254+ SWAPGS_UNSAFE_STACK
13255+ RESTORE_ALL 8
13256+ pax_force_retaddr
13257+ jmp irq_return
13258+#endif
13259 nmi_swapgs:
13260+#ifdef CONFIG_PAX_MEMORY_UDEREF
13261+ pax_exit_kernel_user
13262+#else
13263+ pax_exit_kernel
13264+#endif
13265 SWAPGS_UNSAFE_STACK
13266+ RESTORE_ALL 8
13267+ jmp irq_return
13268 nmi_restore:
13269+ pax_exit_kernel
13270 RESTORE_ALL 8
13271+ pax_force_retaddr
13272 jmp irq_return
13273 nmi_userspace:
13274 GET_THREAD_INFO(%rcx)
13275@@ -1525,14 +1924,14 @@ nmi_schedule:
13276 jmp paranoid_exit
13277 CFI_ENDPROC
13278 #endif
13279-END(nmi)
13280+ENDPROC(nmi)
13281
13282 ENTRY(ignore_sysret)
13283 CFI_STARTPROC
13284 mov $-ENOSYS,%eax
13285 sysret
13286 CFI_ENDPROC
13287-END(ignore_sysret)
13288+ENDPROC(ignore_sysret)
13289
13290 /*
13291 * End of kprobes section
13292diff -urNp linux-3.1.1/arch/x86/kernel/ftrace.c linux-3.1.1/arch/x86/kernel/ftrace.c
13293--- linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-11 15:19:27.000000000 -0500
13294+++ linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-16 18:39:07.000000000 -0500
13295@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13296 static const void *mod_code_newcode; /* holds the text to write to the IP */
13297
13298 static unsigned nmi_wait_count;
13299-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13300+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13301
13302 int ftrace_arch_read_dyn_info(char *buf, int size)
13303 {
13304@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13305
13306 r = snprintf(buf, size, "%u %u",
13307 nmi_wait_count,
13308- atomic_read(&nmi_update_count));
13309+ atomic_read_unchecked(&nmi_update_count));
13310 return r;
13311 }
13312
13313@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13314
13315 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13316 smp_rmb();
13317+ pax_open_kernel();
13318 ftrace_mod_code();
13319- atomic_inc(&nmi_update_count);
13320+ pax_close_kernel();
13321+ atomic_inc_unchecked(&nmi_update_count);
13322 }
13323 /* Must have previous changes seen before executions */
13324 smp_mb();
13325@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13326 {
13327 unsigned char replaced[MCOUNT_INSN_SIZE];
13328
13329+ ip = ktla_ktva(ip);
13330+
13331 /*
13332 * Note: Due to modules and __init, code can
13333 * disappear and change, we need to protect against faulting
13334@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13335 unsigned char old[MCOUNT_INSN_SIZE], *new;
13336 int ret;
13337
13338- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13339+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13340 new = ftrace_call_replace(ip, (unsigned long)func);
13341 ret = ftrace_modify_code(ip, old, new);
13342
13343@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13344 {
13345 unsigned char code[MCOUNT_INSN_SIZE];
13346
13347+ ip = ktla_ktva(ip);
13348+
13349 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13350 return -EFAULT;
13351
13352diff -urNp linux-3.1.1/arch/x86/kernel/head32.c linux-3.1.1/arch/x86/kernel/head32.c
13353--- linux-3.1.1/arch/x86/kernel/head32.c 2011-11-11 15:19:27.000000000 -0500
13354+++ linux-3.1.1/arch/x86/kernel/head32.c 2011-11-16 18:39:07.000000000 -0500
13355@@ -19,6 +19,7 @@
13356 #include <asm/io_apic.h>
13357 #include <asm/bios_ebda.h>
13358 #include <asm/tlbflush.h>
13359+#include <asm/boot.h>
13360
13361 static void __init i386_default_early_setup(void)
13362 {
13363@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13364 {
13365 memblock_init();
13366
13367- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13368+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13369
13370 #ifdef CONFIG_BLK_DEV_INITRD
13371 /* Reserve INITRD */
13372diff -urNp linux-3.1.1/arch/x86/kernel/head_32.S linux-3.1.1/arch/x86/kernel/head_32.S
13373--- linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-11 15:19:27.000000000 -0500
13374+++ linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-16 18:39:07.000000000 -0500
13375@@ -25,6 +25,12 @@
13376 /* Physical address */
13377 #define pa(X) ((X) - __PAGE_OFFSET)
13378
13379+#ifdef CONFIG_PAX_KERNEXEC
13380+#define ta(X) (X)
13381+#else
13382+#define ta(X) ((X) - __PAGE_OFFSET)
13383+#endif
13384+
13385 /*
13386 * References to members of the new_cpu_data structure.
13387 */
13388@@ -54,11 +60,7 @@
13389 * and small than max_low_pfn, otherwise will waste some page table entries
13390 */
13391
13392-#if PTRS_PER_PMD > 1
13393-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13394-#else
13395-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13396-#endif
13397+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13398
13399 /* Number of possible pages in the lowmem region */
13400 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13401@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13402 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13403
13404 /*
13405+ * Real beginning of normal "text" segment
13406+ */
13407+ENTRY(stext)
13408+ENTRY(_stext)
13409+
13410+/*
13411 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13412 * %esi points to the real-mode code as a 32-bit pointer.
13413 * CS and DS must be 4 GB flat segments, but we don't depend on
13414@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13415 * can.
13416 */
13417 __HEAD
13418+
13419+#ifdef CONFIG_PAX_KERNEXEC
13420+ jmp startup_32
13421+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13422+.fill PAGE_SIZE-5,1,0xcc
13423+#endif
13424+
13425 ENTRY(startup_32)
13426 movl pa(stack_start),%ecx
13427
13428@@ -105,6 +120,57 @@ ENTRY(startup_32)
13429 2:
13430 leal -__PAGE_OFFSET(%ecx),%esp
13431
13432+#ifdef CONFIG_SMP
13433+ movl $pa(cpu_gdt_table),%edi
13434+ movl $__per_cpu_load,%eax
13435+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13436+ rorl $16,%eax
13437+ movb %al,__KERNEL_PERCPU + 4(%edi)
13438+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13439+ movl $__per_cpu_end - 1,%eax
13440+ subl $__per_cpu_start,%eax
13441+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13442+#endif
13443+
13444+#ifdef CONFIG_PAX_MEMORY_UDEREF
13445+ movl $NR_CPUS,%ecx
13446+ movl $pa(cpu_gdt_table),%edi
13447+1:
13448+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13449+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13450+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13451+ addl $PAGE_SIZE_asm,%edi
13452+ loop 1b
13453+#endif
13454+
13455+#ifdef CONFIG_PAX_KERNEXEC
13456+ movl $pa(boot_gdt),%edi
13457+ movl $__LOAD_PHYSICAL_ADDR,%eax
13458+ movw %ax,__BOOT_CS + 2(%edi)
13459+ rorl $16,%eax
13460+ movb %al,__BOOT_CS + 4(%edi)
13461+ movb %ah,__BOOT_CS + 7(%edi)
13462+ rorl $16,%eax
13463+
13464+ ljmp $(__BOOT_CS),$1f
13465+1:
13466+
13467+ movl $NR_CPUS,%ecx
13468+ movl $pa(cpu_gdt_table),%edi
13469+ addl $__PAGE_OFFSET,%eax
13470+1:
13471+ movw %ax,__KERNEL_CS + 2(%edi)
13472+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13473+ rorl $16,%eax
13474+ movb %al,__KERNEL_CS + 4(%edi)
13475+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13476+ movb %ah,__KERNEL_CS + 7(%edi)
13477+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13478+ rorl $16,%eax
13479+ addl $PAGE_SIZE_asm,%edi
13480+ loop 1b
13481+#endif
13482+
13483 /*
13484 * Clear BSS first so that there are no surprises...
13485 */
13486@@ -195,8 +261,11 @@ ENTRY(startup_32)
13487 movl %eax, pa(max_pfn_mapped)
13488
13489 /* Do early initialization of the fixmap area */
13490- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13491- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13492+#ifdef CONFIG_COMPAT_VDSO
13493+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13494+#else
13495+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13496+#endif
13497 #else /* Not PAE */
13498
13499 page_pde_offset = (__PAGE_OFFSET >> 20);
13500@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13501 movl %eax, pa(max_pfn_mapped)
13502
13503 /* Do early initialization of the fixmap area */
13504- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13505- movl %eax,pa(initial_page_table+0xffc)
13506+#ifdef CONFIG_COMPAT_VDSO
13507+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13508+#else
13509+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13510+#endif
13511 #endif
13512
13513 #ifdef CONFIG_PARAVIRT
13514@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13515 cmpl $num_subarch_entries, %eax
13516 jae bad_subarch
13517
13518- movl pa(subarch_entries)(,%eax,4), %eax
13519- subl $__PAGE_OFFSET, %eax
13520- jmp *%eax
13521+ jmp *pa(subarch_entries)(,%eax,4)
13522
13523 bad_subarch:
13524 WEAK(lguest_entry)
13525@@ -255,10 +325,10 @@ WEAK(xen_entry)
13526 __INITDATA
13527
13528 subarch_entries:
13529- .long default_entry /* normal x86/PC */
13530- .long lguest_entry /* lguest hypervisor */
13531- .long xen_entry /* Xen hypervisor */
13532- .long default_entry /* Moorestown MID */
13533+ .long ta(default_entry) /* normal x86/PC */
13534+ .long ta(lguest_entry) /* lguest hypervisor */
13535+ .long ta(xen_entry) /* Xen hypervisor */
13536+ .long ta(default_entry) /* Moorestown MID */
13537 num_subarch_entries = (. - subarch_entries) / 4
13538 .previous
13539 #else
13540@@ -312,6 +382,7 @@ default_entry:
13541 orl %edx,%eax
13542 movl %eax,%cr4
13543
13544+#ifdef CONFIG_X86_PAE
13545 testb $X86_CR4_PAE, %al # check if PAE is enabled
13546 jz 6f
13547
13548@@ -340,6 +411,9 @@ default_entry:
13549 /* Make changes effective */
13550 wrmsr
13551
13552+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13553+#endif
13554+
13555 6:
13556
13557 /*
13558@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13559 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13560 movl %eax,%ss # after changing gdt.
13561
13562- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13563+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13564 movl %eax,%ds
13565 movl %eax,%es
13566
13567@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13568 */
13569 cmpb $0,ready
13570 jne 1f
13571- movl $gdt_page,%eax
13572+ movl $cpu_gdt_table,%eax
13573 movl $stack_canary,%ecx
13574+#ifdef CONFIG_SMP
13575+ addl $__per_cpu_load,%ecx
13576+#endif
13577 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13578 shrl $16, %ecx
13579 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13580 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13581 1:
13582-#endif
13583 movl $(__KERNEL_STACK_CANARY),%eax
13584+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13585+ movl $(__USER_DS),%eax
13586+#else
13587+ xorl %eax,%eax
13588+#endif
13589 movl %eax,%gs
13590
13591 xorl %eax,%eax # Clear LDT
13592@@ -558,22 +639,22 @@ early_page_fault:
13593 jmp early_fault
13594
13595 early_fault:
13596- cld
13597 #ifdef CONFIG_PRINTK
13598+ cmpl $1,%ss:early_recursion_flag
13599+ je hlt_loop
13600+ incl %ss:early_recursion_flag
13601+ cld
13602 pusha
13603 movl $(__KERNEL_DS),%eax
13604 movl %eax,%ds
13605 movl %eax,%es
13606- cmpl $2,early_recursion_flag
13607- je hlt_loop
13608- incl early_recursion_flag
13609 movl %cr2,%eax
13610 pushl %eax
13611 pushl %edx /* trapno */
13612 pushl $fault_msg
13613 call printk
13614+; call dump_stack
13615 #endif
13616- call dump_stack
13617 hlt_loop:
13618 hlt
13619 jmp hlt_loop
13620@@ -581,8 +662,11 @@ hlt_loop:
13621 /* This is the default interrupt "handler" :-) */
13622 ALIGN
13623 ignore_int:
13624- cld
13625 #ifdef CONFIG_PRINTK
13626+ cmpl $2,%ss:early_recursion_flag
13627+ je hlt_loop
13628+ incl %ss:early_recursion_flag
13629+ cld
13630 pushl %eax
13631 pushl %ecx
13632 pushl %edx
13633@@ -591,9 +675,6 @@ ignore_int:
13634 movl $(__KERNEL_DS),%eax
13635 movl %eax,%ds
13636 movl %eax,%es
13637- cmpl $2,early_recursion_flag
13638- je hlt_loop
13639- incl early_recursion_flag
13640 pushl 16(%esp)
13641 pushl 24(%esp)
13642 pushl 32(%esp)
13643@@ -622,29 +703,43 @@ ENTRY(initial_code)
13644 /*
13645 * BSS section
13646 */
13647-__PAGE_ALIGNED_BSS
13648- .align PAGE_SIZE
13649 #ifdef CONFIG_X86_PAE
13650+.section .initial_pg_pmd,"a",@progbits
13651 initial_pg_pmd:
13652 .fill 1024*KPMDS,4,0
13653 #else
13654+.section .initial_page_table,"a",@progbits
13655 ENTRY(initial_page_table)
13656 .fill 1024,4,0
13657 #endif
13658+.section .initial_pg_fixmap,"a",@progbits
13659 initial_pg_fixmap:
13660 .fill 1024,4,0
13661+.section .empty_zero_page,"a",@progbits
13662 ENTRY(empty_zero_page)
13663 .fill 4096,1,0
13664+.section .swapper_pg_dir,"a",@progbits
13665 ENTRY(swapper_pg_dir)
13666+#ifdef CONFIG_X86_PAE
13667+ .fill 4,8,0
13668+#else
13669 .fill 1024,4,0
13670+#endif
13671+
13672+/*
13673+ * The IDT has to be page-aligned to simplify the Pentium
13674+ * F0 0F bug workaround.. We have a special link segment
13675+ * for this.
13676+ */
13677+.section .idt,"a",@progbits
13678+ENTRY(idt_table)
13679+ .fill 256,8,0
13680
13681 /*
13682 * This starts the data section.
13683 */
13684 #ifdef CONFIG_X86_PAE
13685-__PAGE_ALIGNED_DATA
13686- /* Page-aligned for the benefit of paravirt? */
13687- .align PAGE_SIZE
13688+.section .initial_page_table,"a",@progbits
13689 ENTRY(initial_page_table)
13690 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13691 # if KPMDS == 3
13692@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13693 # error "Kernel PMDs should be 1, 2 or 3"
13694 # endif
13695 .align PAGE_SIZE /* needs to be page-sized too */
13696+
13697+#ifdef CONFIG_PAX_PER_CPU_PGD
13698+ENTRY(cpu_pgd)
13699+ .rept NR_CPUS
13700+ .fill 4,8,0
13701+ .endr
13702+#endif
13703+
13704 #endif
13705
13706 .data
13707 .balign 4
13708 ENTRY(stack_start)
13709- .long init_thread_union+THREAD_SIZE
13710+ .long init_thread_union+THREAD_SIZE-8
13711+
13712+ready: .byte 0
13713
13714+.section .rodata,"a",@progbits
13715 early_recursion_flag:
13716 .long 0
13717
13718-ready: .byte 0
13719-
13720 int_msg:
13721 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13722
13723@@ -707,7 +811,7 @@ fault_msg:
13724 .word 0 # 32 bit align gdt_desc.address
13725 boot_gdt_descr:
13726 .word __BOOT_DS+7
13727- .long boot_gdt - __PAGE_OFFSET
13728+ .long pa(boot_gdt)
13729
13730 .word 0 # 32-bit align idt_desc.address
13731 idt_descr:
13732@@ -718,7 +822,7 @@ idt_descr:
13733 .word 0 # 32 bit align gdt_desc.address
13734 ENTRY(early_gdt_descr)
13735 .word GDT_ENTRIES*8-1
13736- .long gdt_page /* Overwritten for secondary CPUs */
13737+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13738
13739 /*
13740 * The boot_gdt must mirror the equivalent in setup.S and is
13741@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13742 .align L1_CACHE_BYTES
13743 ENTRY(boot_gdt)
13744 .fill GDT_ENTRY_BOOT_CS,8,0
13745- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13746- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13747+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13748+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13749+
13750+ .align PAGE_SIZE_asm
13751+ENTRY(cpu_gdt_table)
13752+ .rept NR_CPUS
13753+ .quad 0x0000000000000000 /* NULL descriptor */
13754+ .quad 0x0000000000000000 /* 0x0b reserved */
13755+ .quad 0x0000000000000000 /* 0x13 reserved */
13756+ .quad 0x0000000000000000 /* 0x1b reserved */
13757+
13758+#ifdef CONFIG_PAX_KERNEXEC
13759+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13760+#else
13761+ .quad 0x0000000000000000 /* 0x20 unused */
13762+#endif
13763+
13764+ .quad 0x0000000000000000 /* 0x28 unused */
13765+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13766+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13767+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13768+ .quad 0x0000000000000000 /* 0x4b reserved */
13769+ .quad 0x0000000000000000 /* 0x53 reserved */
13770+ .quad 0x0000000000000000 /* 0x5b reserved */
13771+
13772+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13773+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13774+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13775+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13776+
13777+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13778+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13779+
13780+ /*
13781+ * Segments used for calling PnP BIOS have byte granularity.
13782+ * The code segments and data segments have fixed 64k limits,
13783+ * the transfer segment sizes are set at run time.
13784+ */
13785+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13786+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13787+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13788+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13789+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13790+
13791+ /*
13792+ * The APM segments have byte granularity and their bases
13793+ * are set at run time. All have 64k limits.
13794+ */
13795+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13796+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13797+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13798+
13799+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13800+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13801+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13802+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13803+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13804+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13805+
13806+ /* Be sure this is zeroed to avoid false validations in Xen */
13807+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13808+ .endr
13809diff -urNp linux-3.1.1/arch/x86/kernel/head_64.S linux-3.1.1/arch/x86/kernel/head_64.S
13810--- linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-11 15:19:27.000000000 -0500
13811+++ linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-16 18:39:07.000000000 -0500
13812@@ -19,6 +19,7 @@
13813 #include <asm/cache.h>
13814 #include <asm/processor-flags.h>
13815 #include <asm/percpu.h>
13816+#include <asm/cpufeature.h>
13817
13818 #ifdef CONFIG_PARAVIRT
13819 #include <asm/asm-offsets.h>
13820@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13821 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13822 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13823 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13824+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13825+L3_VMALLOC_START = pud_index(VMALLOC_START)
13826+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13827+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13828
13829 .text
13830 __HEAD
13831@@ -85,35 +90,22 @@ startup_64:
13832 */
13833 addq %rbp, init_level4_pgt + 0(%rip)
13834 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13835+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13836+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13837 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13838
13839 addq %rbp, level3_ident_pgt + 0(%rip)
13840+#ifndef CONFIG_XEN
13841+ addq %rbp, level3_ident_pgt + 8(%rip)
13842+#endif
13843
13844- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13845- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13846+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13847
13848- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13849+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13850+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13851
13852- /* Add an Identity mapping if I am above 1G */
13853- leaq _text(%rip), %rdi
13854- andq $PMD_PAGE_MASK, %rdi
13855-
13856- movq %rdi, %rax
13857- shrq $PUD_SHIFT, %rax
13858- andq $(PTRS_PER_PUD - 1), %rax
13859- jz ident_complete
13860-
13861- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13862- leaq level3_ident_pgt(%rip), %rbx
13863- movq %rdx, 0(%rbx, %rax, 8)
13864-
13865- movq %rdi, %rax
13866- shrq $PMD_SHIFT, %rax
13867- andq $(PTRS_PER_PMD - 1), %rax
13868- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13869- leaq level2_spare_pgt(%rip), %rbx
13870- movq %rdx, 0(%rbx, %rax, 8)
13871-ident_complete:
13872+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13873+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13874
13875 /*
13876 * Fixup the kernel text+data virtual addresses. Note that
13877@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13878 * after the boot processor executes this code.
13879 */
13880
13881- /* Enable PAE mode and PGE */
13882- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13883+ /* Enable PAE mode and PSE/PGE */
13884+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13885 movq %rax, %cr4
13886
13887 /* Setup early boot stage 4 level pagetables. */
13888@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13889 movl $MSR_EFER, %ecx
13890 rdmsr
13891 btsl $_EFER_SCE, %eax /* Enable System Call */
13892- btl $20,%edi /* No Execute supported? */
13893+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13894 jnc 1f
13895 btsl $_EFER_NX, %eax
13896+ leaq init_level4_pgt(%rip), %rdi
13897+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13898+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13899+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13900+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13901 1: wrmsr /* Make changes effective */
13902
13903 /* Setup cr0 */
13904@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13905 bad_address:
13906 jmp bad_address
13907
13908- .section ".init.text","ax"
13909+ __INIT
13910 #ifdef CONFIG_EARLY_PRINTK
13911 .globl early_idt_handlers
13912 early_idt_handlers:
13913@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13914 #endif /* EARLY_PRINTK */
13915 1: hlt
13916 jmp 1b
13917+ .previous
13918
13919 #ifdef CONFIG_EARLY_PRINTK
13920+ __INITDATA
13921 early_recursion_flag:
13922 .long 0
13923+ .previous
13924
13925+ .section .rodata,"a",@progbits
13926 early_idt_msg:
13927 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13928 early_idt_ripmsg:
13929 .asciz "RIP %s\n"
13930-#endif /* CONFIG_EARLY_PRINTK */
13931 .previous
13932+#endif /* CONFIG_EARLY_PRINTK */
13933
13934+ .section .rodata,"a",@progbits
13935 #define NEXT_PAGE(name) \
13936 .balign PAGE_SIZE; \
13937 ENTRY(name)
13938@@ -338,7 +340,6 @@ ENTRY(name)
13939 i = i + 1 ; \
13940 .endr
13941
13942- .data
13943 /*
13944 * This default setting generates an ident mapping at address 0x100000
13945 * and a mapping for the kernel that precisely maps virtual address
13946@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13947 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13948 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13949 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13950+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13951+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13952+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13953+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13954 .org init_level4_pgt + L4_START_KERNEL*8, 0
13955 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13956 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13957
13958+#ifdef CONFIG_PAX_PER_CPU_PGD
13959+NEXT_PAGE(cpu_pgd)
13960+ .rept NR_CPUS
13961+ .fill 512,8,0
13962+ .endr
13963+#endif
13964+
13965 NEXT_PAGE(level3_ident_pgt)
13966 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13967+#ifdef CONFIG_XEN
13968 .fill 511,8,0
13969+#else
13970+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13971+ .fill 510,8,0
13972+#endif
13973+
13974+NEXT_PAGE(level3_vmalloc_pgt)
13975+ .fill 512,8,0
13976+
13977+NEXT_PAGE(level3_vmemmap_pgt)
13978+ .fill L3_VMEMMAP_START,8,0
13979+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13980
13981 NEXT_PAGE(level3_kernel_pgt)
13982 .fill L3_START_KERNEL,8,0
13983@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13984 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13985 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13986
13987+NEXT_PAGE(level2_vmemmap_pgt)
13988+ .fill 512,8,0
13989+
13990 NEXT_PAGE(level2_fixmap_pgt)
13991- .fill 506,8,0
13992- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13993- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13994- .fill 5,8,0
13995+ .fill 507,8,0
13996+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13997+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13998+ .fill 4,8,0
13999
14000-NEXT_PAGE(level1_fixmap_pgt)
14001+NEXT_PAGE(level1_vsyscall_pgt)
14002 .fill 512,8,0
14003
14004-NEXT_PAGE(level2_ident_pgt)
14005- /* Since I easily can, map the first 1G.
14006+ /* Since I easily can, map the first 2G.
14007 * Don't set NX because code runs from these pages.
14008 */
14009- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14010+NEXT_PAGE(level2_ident_pgt)
14011+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14012
14013 NEXT_PAGE(level2_kernel_pgt)
14014 /*
14015@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
14016 * If you want to increase this then increase MODULES_VADDR
14017 * too.)
14018 */
14019- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14020- KERNEL_IMAGE_SIZE/PMD_SIZE)
14021-
14022-NEXT_PAGE(level2_spare_pgt)
14023- .fill 512, 8, 0
14024+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14025
14026 #undef PMDS
14027 #undef NEXT_PAGE
14028
14029- .data
14030+ .align PAGE_SIZE
14031+ENTRY(cpu_gdt_table)
14032+ .rept NR_CPUS
14033+ .quad 0x0000000000000000 /* NULL descriptor */
14034+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14035+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14036+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14037+ .quad 0x00cffb000000ffff /* __USER32_CS */
14038+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14039+ .quad 0x00affb000000ffff /* __USER_CS */
14040+
14041+#ifdef CONFIG_PAX_KERNEXEC
14042+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14043+#else
14044+ .quad 0x0 /* unused */
14045+#endif
14046+
14047+ .quad 0,0 /* TSS */
14048+ .quad 0,0 /* LDT */
14049+ .quad 0,0,0 /* three TLS descriptors */
14050+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14051+ /* asm/segment.h:GDT_ENTRIES must match this */
14052+
14053+ /* zero the remaining page */
14054+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14055+ .endr
14056+
14057 .align 16
14058 .globl early_gdt_descr
14059 early_gdt_descr:
14060 .word GDT_ENTRIES*8-1
14061 early_gdt_descr_base:
14062- .quad INIT_PER_CPU_VAR(gdt_page)
14063+ .quad cpu_gdt_table
14064
14065 ENTRY(phys_base)
14066 /* This must match the first entry in level2_kernel_pgt */
14067 .quad 0x0000000000000000
14068
14069 #include "../../x86/xen/xen-head.S"
14070-
14071- .section .bss, "aw", @nobits
14072+
14073+ .section .rodata,"a",@progbits
14074 .align L1_CACHE_BYTES
14075 ENTRY(idt_table)
14076- .skip IDT_ENTRIES * 16
14077+ .fill 512,8,0
14078
14079 __PAGE_ALIGNED_BSS
14080 .align PAGE_SIZE
14081diff -urNp linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c
14082--- linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-11 15:19:27.000000000 -0500
14083+++ linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-16 18:39:07.000000000 -0500
14084@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14085 EXPORT_SYMBOL(cmpxchg8b_emu);
14086 #endif
14087
14088+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14089+
14090 /* Networking helper routines. */
14091 EXPORT_SYMBOL(csum_partial_copy_generic);
14092+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14093+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14094
14095 EXPORT_SYMBOL(__get_user_1);
14096 EXPORT_SYMBOL(__get_user_2);
14097@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14098
14099 EXPORT_SYMBOL(csum_partial);
14100 EXPORT_SYMBOL(empty_zero_page);
14101+
14102+#ifdef CONFIG_PAX_KERNEXEC
14103+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14104+#endif
14105diff -urNp linux-3.1.1/arch/x86/kernel/i8259.c linux-3.1.1/arch/x86/kernel/i8259.c
14106--- linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-11 15:19:27.000000000 -0500
14107+++ linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-16 18:39:07.000000000 -0500
14108@@ -210,7 +210,7 @@ spurious_8259A_irq:
14109 "spurious 8259A interrupt: IRQ%d.\n", irq);
14110 spurious_irq_mask |= irqmask;
14111 }
14112- atomic_inc(&irq_err_count);
14113+ atomic_inc_unchecked(&irq_err_count);
14114 /*
14115 * Theoretically we do not have to handle this IRQ,
14116 * but in Linux this does not cause problems and is
14117diff -urNp linux-3.1.1/arch/x86/kernel/init_task.c linux-3.1.1/arch/x86/kernel/init_task.c
14118--- linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-11 15:19:27.000000000 -0500
14119+++ linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-16 18:39:07.000000000 -0500
14120@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14121 * way process stacks are handled. This is done by having a special
14122 * "init_task" linker map entry..
14123 */
14124-union thread_union init_thread_union __init_task_data =
14125- { INIT_THREAD_INFO(init_task) };
14126+union thread_union init_thread_union __init_task_data;
14127
14128 /*
14129 * Initial task structure.
14130@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14131 * section. Since TSS's are completely CPU-local, we want them
14132 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14133 */
14134-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14135-
14136+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14137+EXPORT_SYMBOL(init_tss);
14138diff -urNp linux-3.1.1/arch/x86/kernel/ioport.c linux-3.1.1/arch/x86/kernel/ioport.c
14139--- linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-11 15:19:27.000000000 -0500
14140+++ linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-16 18:40:08.000000000 -0500
14141@@ -6,6 +6,7 @@
14142 #include <linux/sched.h>
14143 #include <linux/kernel.h>
14144 #include <linux/capability.h>
14145+#include <linux/security.h>
14146 #include <linux/errno.h>
14147 #include <linux/types.h>
14148 #include <linux/ioport.h>
14149@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14150
14151 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14152 return -EINVAL;
14153+#ifdef CONFIG_GRKERNSEC_IO
14154+ if (turn_on && grsec_disable_privio) {
14155+ gr_handle_ioperm();
14156+ return -EPERM;
14157+ }
14158+#endif
14159 if (turn_on && !capable(CAP_SYS_RAWIO))
14160 return -EPERM;
14161
14162@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14163 * because the ->io_bitmap_max value must match the bitmap
14164 * contents:
14165 */
14166- tss = &per_cpu(init_tss, get_cpu());
14167+ tss = init_tss + get_cpu();
14168
14169 if (turn_on)
14170 bitmap_clear(t->io_bitmap_ptr, from, num);
14171@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14172 return -EINVAL;
14173 /* Trying to gain more privileges? */
14174 if (level > old) {
14175+#ifdef CONFIG_GRKERNSEC_IO
14176+ if (grsec_disable_privio) {
14177+ gr_handle_iopl();
14178+ return -EPERM;
14179+ }
14180+#endif
14181 if (!capable(CAP_SYS_RAWIO))
14182 return -EPERM;
14183 }
14184diff -urNp linux-3.1.1/arch/x86/kernel/irq_32.c linux-3.1.1/arch/x86/kernel/irq_32.c
14185--- linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-11 15:19:27.000000000 -0500
14186+++ linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-16 18:39:07.000000000 -0500
14187@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14188 __asm__ __volatile__("andl %%esp,%0" :
14189 "=r" (sp) : "0" (THREAD_SIZE - 1));
14190
14191- return sp < (sizeof(struct thread_info) + STACK_WARN);
14192+ return sp < STACK_WARN;
14193 }
14194
14195 static void print_stack_overflow(void)
14196@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14197 * per-CPU IRQ handling contexts (thread information and stack)
14198 */
14199 union irq_ctx {
14200- struct thread_info tinfo;
14201- u32 stack[THREAD_SIZE/sizeof(u32)];
14202+ unsigned long previous_esp;
14203+ u32 stack[THREAD_SIZE/sizeof(u32)];
14204 } __attribute__((aligned(THREAD_SIZE)));
14205
14206 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14207@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14208 static inline int
14209 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14210 {
14211- union irq_ctx *curctx, *irqctx;
14212+ union irq_ctx *irqctx;
14213 u32 *isp, arg1, arg2;
14214
14215- curctx = (union irq_ctx *) current_thread_info();
14216 irqctx = __this_cpu_read(hardirq_ctx);
14217
14218 /*
14219@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14220 * handler) we can't do that and just have to keep using the
14221 * current stack (which is the irq stack already after all)
14222 */
14223- if (unlikely(curctx == irqctx))
14224+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14225 return 0;
14226
14227 /* build the stack frame on the IRQ stack */
14228- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14229- irqctx->tinfo.task = curctx->tinfo.task;
14230- irqctx->tinfo.previous_esp = current_stack_pointer;
14231+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14232+ irqctx->previous_esp = current_stack_pointer;
14233
14234- /*
14235- * Copy the softirq bits in preempt_count so that the
14236- * softirq checks work in the hardirq context.
14237- */
14238- irqctx->tinfo.preempt_count =
14239- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14240- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14241+#ifdef CONFIG_PAX_MEMORY_UDEREF
14242+ __set_fs(MAKE_MM_SEG(0));
14243+#endif
14244
14245 if (unlikely(overflow))
14246 call_on_stack(print_stack_overflow, isp);
14247@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14248 : "0" (irq), "1" (desc), "2" (isp),
14249 "D" (desc->handle_irq)
14250 : "memory", "cc", "ecx");
14251+
14252+#ifdef CONFIG_PAX_MEMORY_UDEREF
14253+ __set_fs(current_thread_info()->addr_limit);
14254+#endif
14255+
14256 return 1;
14257 }
14258
14259@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14260 */
14261 void __cpuinit irq_ctx_init(int cpu)
14262 {
14263- union irq_ctx *irqctx;
14264-
14265 if (per_cpu(hardirq_ctx, cpu))
14266 return;
14267
14268- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14269- THREAD_FLAGS,
14270- THREAD_ORDER));
14271- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14272- irqctx->tinfo.cpu = cpu;
14273- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14274- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14275-
14276- per_cpu(hardirq_ctx, cpu) = irqctx;
14277-
14278- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14279- THREAD_FLAGS,
14280- THREAD_ORDER));
14281- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14282- irqctx->tinfo.cpu = cpu;
14283- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14284-
14285- per_cpu(softirq_ctx, cpu) = irqctx;
14286+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14287+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14288
14289 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14290 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14291@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14292 asmlinkage void do_softirq(void)
14293 {
14294 unsigned long flags;
14295- struct thread_info *curctx;
14296 union irq_ctx *irqctx;
14297 u32 *isp;
14298
14299@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14300 local_irq_save(flags);
14301
14302 if (local_softirq_pending()) {
14303- curctx = current_thread_info();
14304 irqctx = __this_cpu_read(softirq_ctx);
14305- irqctx->tinfo.task = curctx->task;
14306- irqctx->tinfo.previous_esp = current_stack_pointer;
14307+ irqctx->previous_esp = current_stack_pointer;
14308
14309 /* build the stack frame on the softirq stack */
14310- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14311+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14312+
14313+#ifdef CONFIG_PAX_MEMORY_UDEREF
14314+ __set_fs(MAKE_MM_SEG(0));
14315+#endif
14316
14317 call_on_stack(__do_softirq, isp);
14318+
14319+#ifdef CONFIG_PAX_MEMORY_UDEREF
14320+ __set_fs(current_thread_info()->addr_limit);
14321+#endif
14322+
14323 /*
14324 * Shouldn't happen, we returned above if in_interrupt():
14325 */
14326diff -urNp linux-3.1.1/arch/x86/kernel/irq.c linux-3.1.1/arch/x86/kernel/irq.c
14327--- linux-3.1.1/arch/x86/kernel/irq.c 2011-11-11 15:19:27.000000000 -0500
14328+++ linux-3.1.1/arch/x86/kernel/irq.c 2011-11-16 18:39:07.000000000 -0500
14329@@ -17,7 +17,7 @@
14330 #include <asm/mce.h>
14331 #include <asm/hw_irq.h>
14332
14333-atomic_t irq_err_count;
14334+atomic_unchecked_t irq_err_count;
14335
14336 /* Function pointer for generic interrupt vector handling */
14337 void (*x86_platform_ipi_callback)(void) = NULL;
14338@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14339 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14340 seq_printf(p, " Machine check polls\n");
14341 #endif
14342- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14343+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14344 #if defined(CONFIG_X86_IO_APIC)
14345- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14346+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14347 #endif
14348 return 0;
14349 }
14350@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14351
14352 u64 arch_irq_stat(void)
14353 {
14354- u64 sum = atomic_read(&irq_err_count);
14355+ u64 sum = atomic_read_unchecked(&irq_err_count);
14356
14357 #ifdef CONFIG_X86_IO_APIC
14358- sum += atomic_read(&irq_mis_count);
14359+ sum += atomic_read_unchecked(&irq_mis_count);
14360 #endif
14361 return sum;
14362 }
14363diff -urNp linux-3.1.1/arch/x86/kernel/kgdb.c linux-3.1.1/arch/x86/kernel/kgdb.c
14364--- linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-11 15:19:27.000000000 -0500
14365+++ linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-16 18:39:07.000000000 -0500
14366@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14367 #ifdef CONFIG_X86_32
14368 switch (regno) {
14369 case GDB_SS:
14370- if (!user_mode_vm(regs))
14371+ if (!user_mode(regs))
14372 *(unsigned long *)mem = __KERNEL_DS;
14373 break;
14374 case GDB_SP:
14375- if (!user_mode_vm(regs))
14376+ if (!user_mode(regs))
14377 *(unsigned long *)mem = kernel_stack_pointer(regs);
14378 break;
14379 case GDB_GS:
14380@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14381 case 'k':
14382 /* clear the trace bit */
14383 linux_regs->flags &= ~X86_EFLAGS_TF;
14384- atomic_set(&kgdb_cpu_doing_single_step, -1);
14385+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14386
14387 /* set the trace bit if we're stepping */
14388 if (remcomInBuffer[0] == 's') {
14389 linux_regs->flags |= X86_EFLAGS_TF;
14390- atomic_set(&kgdb_cpu_doing_single_step,
14391+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14392 raw_smp_processor_id());
14393 }
14394
14395@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14396 return NOTIFY_DONE;
14397
14398 case DIE_DEBUG:
14399- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14400+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14401 if (user_mode(regs))
14402 return single_step_cont(regs, args);
14403 break;
14404diff -urNp linux-3.1.1/arch/x86/kernel/kprobes.c linux-3.1.1/arch/x86/kernel/kprobes.c
14405--- linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
14406+++ linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-16 18:39:07.000000000 -0500
14407@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relat
14408 } __attribute__((packed)) *insn;
14409
14410 insn = (struct __arch_relative_insn *)from;
14411+
14412+ pax_open_kernel();
14413 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14414 insn->op = op;
14415+ pax_close_kernel();
14416 }
14417
14418 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14419@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_op
14420 kprobe_opcode_t opcode;
14421 kprobe_opcode_t *orig_opcodes = opcodes;
14422
14423- if (search_exception_tables((unsigned long)opcodes))
14424+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14425 return 0; /* Page fault may occur on this address. */
14426
14427 retry:
14428@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(
14429 }
14430 }
14431 insn_get_length(&insn);
14432+ pax_open_kernel();
14433 memcpy(dest, insn.kaddr, insn.length);
14434+ pax_close_kernel();
14435
14436 #ifdef CONFIG_X86_64
14437 if (insn_rip_relative(&insn)) {
14438@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(
14439 (u8 *) dest;
14440 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14441 disp = (u8 *) dest + insn_offset_displacement(&insn);
14442+ pax_open_kernel();
14443 *(s32 *) disp = (s32) newdisp;
14444+ pax_close_kernel();
14445 }
14446 #endif
14447 return insn.length;
14448@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(s
14449 */
14450 __copy_instruction(p->ainsn.insn, p->addr, 0);
14451
14452- if (can_boost(p->addr))
14453+ if (can_boost(ktla_ktva(p->addr)))
14454 p->ainsn.boostable = 0;
14455 else
14456 p->ainsn.boostable = -1;
14457
14458- p->opcode = *p->addr;
14459+ p->opcode = *(ktla_ktva(p->addr));
14460 }
14461
14462 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14463@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(s
14464 * nor set current_kprobe, because it doesn't use single
14465 * stepping.
14466 */
14467- regs->ip = (unsigned long)p->ainsn.insn;
14468+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14469 preempt_enable_no_resched();
14470 return;
14471 }
14472@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(s
14473 if (p->opcode == BREAKPOINT_INSTRUCTION)
14474 regs->ip = (unsigned long)p->addr;
14475 else
14476- regs->ip = (unsigned long)p->ainsn.insn;
14477+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14478 }
14479
14480 /*
14481@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(stru
14482 setup_singlestep(p, regs, kcb, 0);
14483 return 1;
14484 }
14485- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14486+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14487 /*
14488 * The breakpoint instruction was removed right
14489 * after we hit it. Another cpu has removed
14490@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_t
14491 " movq %rax, 152(%rsp)\n"
14492 RESTORE_REGS_STRING
14493 " popfq\n"
14494+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14495+ " btsq $63,(%rsp)\n"
14496+#endif
14497 #else
14498 " pushf\n"
14499 SAVE_REGS_STRING
14500@@ -819,7 +829,7 @@ static void __kprobes resume_execution(s
14501 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14502 {
14503 unsigned long *tos = stack_addr(regs);
14504- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14505+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14506 unsigned long orig_ip = (unsigned long)p->addr;
14507 kprobe_opcode_t *insn = p->ainsn.insn;
14508
14509@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(s
14510 struct die_args *args = data;
14511 int ret = NOTIFY_DONE;
14512
14513- if (args->regs && user_mode_vm(args->regs))
14514+ if (args->regs && user_mode(args->regs))
14515 return ret;
14516
14517 switch (val) {
14518@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kpr
14519 * Verify if the address gap is in 2GB range, because this uses
14520 * a relative jump.
14521 */
14522- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14523+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14524 if (abs(rel) > 0x7fffffff)
14525 return -ERANGE;
14526
14527@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kpr
14528 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14529
14530 /* Set probe function call */
14531- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14532+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14533
14534 /* Set returning jmp instruction at the tail of out-of-line buffer */
14535 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14536- (u8 *)op->kp.addr + op->optinsn.size);
14537+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14538
14539 flush_icache_range((unsigned long) buf,
14540 (unsigned long) buf + TMPL_END_IDX +
14541@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kpr
14542 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14543
14544 /* Backup instructions which will be replaced by jump address */
14545- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14546+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14547 RELATIVE_ADDR_SIZE);
14548
14549 insn_buf[0] = RELATIVEJUMP_OPCODE;
14550diff -urNp linux-3.1.1/arch/x86/kernel/kvm.c linux-3.1.1/arch/x86/kernel/kvm.c
14551--- linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-11 15:19:27.000000000 -0500
14552+++ linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-16 18:39:07.000000000 -0500
14553@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(vo
14554 pv_mmu_ops.set_pud = kvm_set_pud;
14555 #if PAGETABLE_LEVELS == 4
14556 pv_mmu_ops.set_pgd = kvm_set_pgd;
14557+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14558 #endif
14559 #endif
14560 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14561diff -urNp linux-3.1.1/arch/x86/kernel/ldt.c linux-3.1.1/arch/x86/kernel/ldt.c
14562--- linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-11 15:19:27.000000000 -0500
14563+++ linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-16 18:39:07.000000000 -0500
14564@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14565 if (reload) {
14566 #ifdef CONFIG_SMP
14567 preempt_disable();
14568- load_LDT(pc);
14569+ load_LDT_nolock(pc);
14570 if (!cpumask_equal(mm_cpumask(current->mm),
14571 cpumask_of(smp_processor_id())))
14572 smp_call_function(flush_ldt, current->mm, 1);
14573 preempt_enable();
14574 #else
14575- load_LDT(pc);
14576+ load_LDT_nolock(pc);
14577 #endif
14578 }
14579 if (oldsize) {
14580@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14581 return err;
14582
14583 for (i = 0; i < old->size; i++)
14584- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14585+ write_ldt_entry(new->ldt, i, old->ldt + i);
14586 return 0;
14587 }
14588
14589@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14590 retval = copy_ldt(&mm->context, &old_mm->context);
14591 mutex_unlock(&old_mm->context.lock);
14592 }
14593+
14594+ if (tsk == current) {
14595+ mm->context.vdso = 0;
14596+
14597+#ifdef CONFIG_X86_32
14598+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14599+ mm->context.user_cs_base = 0UL;
14600+ mm->context.user_cs_limit = ~0UL;
14601+
14602+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14603+ cpus_clear(mm->context.cpu_user_cs_mask);
14604+#endif
14605+
14606+#endif
14607+#endif
14608+
14609+ }
14610+
14611 return retval;
14612 }
14613
14614@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14615 }
14616 }
14617
14618+#ifdef CONFIG_PAX_SEGMEXEC
14619+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14620+ error = -EINVAL;
14621+ goto out_unlock;
14622+ }
14623+#endif
14624+
14625 fill_ldt(&ldt, &ldt_info);
14626 if (oldmode)
14627 ldt.avl = 0;
14628diff -urNp linux-3.1.1/arch/x86/kernel/machine_kexec_32.c linux-3.1.1/arch/x86/kernel/machine_kexec_32.c
14629--- linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-11 15:19:27.000000000 -0500
14630+++ linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-16 18:39:07.000000000 -0500
14631@@ -27,7 +27,7 @@
14632 #include <asm/cacheflush.h>
14633 #include <asm/debugreg.h>
14634
14635-static void set_idt(void *newidt, __u16 limit)
14636+static void set_idt(struct desc_struct *newidt, __u16 limit)
14637 {
14638 struct desc_ptr curidt;
14639
14640@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14641 }
14642
14643
14644-static void set_gdt(void *newgdt, __u16 limit)
14645+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14646 {
14647 struct desc_ptr curgdt;
14648
14649@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14650 }
14651
14652 control_page = page_address(image->control_code_page);
14653- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14654+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14655
14656 relocate_kernel_ptr = control_page;
14657 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14658diff -urNp linux-3.1.1/arch/x86/kernel/microcode_intel.c linux-3.1.1/arch/x86/kernel/microcode_intel.c
14659--- linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-11 15:19:27.000000000 -0500
14660+++ linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-16 18:39:07.000000000 -0500
14661@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14662
14663 static int get_ucode_user(void *to, const void *from, size_t n)
14664 {
14665- return copy_from_user(to, from, n);
14666+ return copy_from_user(to, (const void __force_user *)from, n);
14667 }
14668
14669 static enum ucode_state
14670 request_microcode_user(int cpu, const void __user *buf, size_t size)
14671 {
14672- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14673+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14674 }
14675
14676 static void microcode_fini_cpu(int cpu)
14677diff -urNp linux-3.1.1/arch/x86/kernel/module.c linux-3.1.1/arch/x86/kernel/module.c
14678--- linux-3.1.1/arch/x86/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
14679+++ linux-3.1.1/arch/x86/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
14680@@ -36,15 +36,60 @@
14681 #define DEBUGP(fmt...)
14682 #endif
14683
14684-void *module_alloc(unsigned long size)
14685+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14686 {
14687 if (PAGE_ALIGN(size) > MODULES_LEN)
14688 return NULL;
14689 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14690- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14691+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14692 -1, __builtin_return_address(0));
14693 }
14694
14695+void *module_alloc(unsigned long size)
14696+{
14697+
14698+#ifdef CONFIG_PAX_KERNEXEC
14699+ return __module_alloc(size, PAGE_KERNEL);
14700+#else
14701+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14702+#endif
14703+
14704+}
14705+
14706+#ifdef CONFIG_PAX_KERNEXEC
14707+#ifdef CONFIG_X86_32
14708+void *module_alloc_exec(unsigned long size)
14709+{
14710+ struct vm_struct *area;
14711+
14712+ if (size == 0)
14713+ return NULL;
14714+
14715+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14716+ return area ? area->addr : NULL;
14717+}
14718+EXPORT_SYMBOL(module_alloc_exec);
14719+
14720+void module_free_exec(struct module *mod, void *module_region)
14721+{
14722+ vunmap(module_region);
14723+}
14724+EXPORT_SYMBOL(module_free_exec);
14725+#else
14726+void module_free_exec(struct module *mod, void *module_region)
14727+{
14728+ module_free(mod, module_region);
14729+}
14730+EXPORT_SYMBOL(module_free_exec);
14731+
14732+void *module_alloc_exec(unsigned long size)
14733+{
14734+ return __module_alloc(size, PAGE_KERNEL_RX);
14735+}
14736+EXPORT_SYMBOL(module_alloc_exec);
14737+#endif
14738+#endif
14739+
14740 #ifdef CONFIG_X86_32
14741 int apply_relocate(Elf32_Shdr *sechdrs,
14742 const char *strtab,
14743@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14744 unsigned int i;
14745 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14746 Elf32_Sym *sym;
14747- uint32_t *location;
14748+ uint32_t *plocation, location;
14749
14750 DEBUGP("Applying relocate section %u to %u\n", relsec,
14751 sechdrs[relsec].sh_info);
14752 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14753 /* This is where to make the change */
14754- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14755- + rel[i].r_offset;
14756+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14757+ location = (uint32_t)plocation;
14758+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14759+ plocation = ktla_ktva((void *)plocation);
14760 /* This is the symbol it is referring to. Note that all
14761 undefined symbols have been resolved. */
14762 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14763@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14764 switch (ELF32_R_TYPE(rel[i].r_info)) {
14765 case R_386_32:
14766 /* We add the value into the location given */
14767- *location += sym->st_value;
14768+ pax_open_kernel();
14769+ *plocation += sym->st_value;
14770+ pax_close_kernel();
14771 break;
14772 case R_386_PC32:
14773 /* Add the value, subtract its postition */
14774- *location += sym->st_value - (uint32_t)location;
14775+ pax_open_kernel();
14776+ *plocation += sym->st_value - location;
14777+ pax_close_kernel();
14778 break;
14779 default:
14780 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14781@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14782 case R_X86_64_NONE:
14783 break;
14784 case R_X86_64_64:
14785+ pax_open_kernel();
14786 *(u64 *)loc = val;
14787+ pax_close_kernel();
14788 break;
14789 case R_X86_64_32:
14790+ pax_open_kernel();
14791 *(u32 *)loc = val;
14792+ pax_close_kernel();
14793 if (val != *(u32 *)loc)
14794 goto overflow;
14795 break;
14796 case R_X86_64_32S:
14797+ pax_open_kernel();
14798 *(s32 *)loc = val;
14799+ pax_close_kernel();
14800 if ((s64)val != *(s32 *)loc)
14801 goto overflow;
14802 break;
14803 case R_X86_64_PC32:
14804 val -= (u64)loc;
14805+ pax_open_kernel();
14806 *(u32 *)loc = val;
14807+ pax_close_kernel();
14808+
14809 #if 0
14810 if ((s64)val != *(s32 *)loc)
14811 goto overflow;
14812diff -urNp linux-3.1.1/arch/x86/kernel/paravirt.c linux-3.1.1/arch/x86/kernel/paravirt.c
14813--- linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-11 15:19:27.000000000 -0500
14814+++ linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-16 18:40:08.000000000 -0500
14815@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14816 {
14817 return x;
14818 }
14819+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14820+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14821+#endif
14822
14823 void __init default_banner(void)
14824 {
14825@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14826 * corresponding structure. */
14827 static void *get_call_destination(u8 type)
14828 {
14829- struct paravirt_patch_template tmpl = {
14830+ const struct paravirt_patch_template tmpl = {
14831 .pv_init_ops = pv_init_ops,
14832 .pv_time_ops = pv_time_ops,
14833 .pv_cpu_ops = pv_cpu_ops,
14834@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14835 .pv_lock_ops = pv_lock_ops,
14836 #endif
14837 };
14838+
14839+ pax_track_stack();
14840+
14841 return *((void **)&tmpl + type);
14842 }
14843
14844@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14845 if (opfunc == NULL)
14846 /* If there's no function, patch it with a ud2a (BUG) */
14847 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14848- else if (opfunc == _paravirt_nop)
14849+ else if (opfunc == (void *)_paravirt_nop)
14850 /* If the operation is a nop, then nop the callsite */
14851 ret = paravirt_patch_nop();
14852
14853 /* identity functions just return their single argument */
14854- else if (opfunc == _paravirt_ident_32)
14855+ else if (opfunc == (void *)_paravirt_ident_32)
14856 ret = paravirt_patch_ident_32(insnbuf, len);
14857- else if (opfunc == _paravirt_ident_64)
14858+ else if (opfunc == (void *)_paravirt_ident_64)
14859 ret = paravirt_patch_ident_64(insnbuf, len);
14860+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14861+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14862+ ret = paravirt_patch_ident_64(insnbuf, len);
14863+#endif
14864
14865 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14866 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14867@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14868 if (insn_len > len || start == NULL)
14869 insn_len = len;
14870 else
14871- memcpy(insnbuf, start, insn_len);
14872+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14873
14874 return insn_len;
14875 }
14876@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
14877 preempt_enable();
14878 }
14879
14880-struct pv_info pv_info = {
14881+struct pv_info pv_info __read_only = {
14882 .name = "bare hardware",
14883 .paravirt_enabled = 0,
14884 .kernel_rpl = 0,
14885@@ -313,16 +323,16 @@ struct pv_info pv_info = {
14886 #endif
14887 };
14888
14889-struct pv_init_ops pv_init_ops = {
14890+struct pv_init_ops pv_init_ops __read_only = {
14891 .patch = native_patch,
14892 };
14893
14894-struct pv_time_ops pv_time_ops = {
14895+struct pv_time_ops pv_time_ops __read_only = {
14896 .sched_clock = native_sched_clock,
14897 .steal_clock = native_steal_clock,
14898 };
14899
14900-struct pv_irq_ops pv_irq_ops = {
14901+struct pv_irq_ops pv_irq_ops __read_only = {
14902 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14903 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14904 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14905@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
14906 #endif
14907 };
14908
14909-struct pv_cpu_ops pv_cpu_ops = {
14910+struct pv_cpu_ops pv_cpu_ops __read_only = {
14911 .cpuid = native_cpuid,
14912 .get_debugreg = native_get_debugreg,
14913 .set_debugreg = native_set_debugreg,
14914@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14915 .end_context_switch = paravirt_nop,
14916 };
14917
14918-struct pv_apic_ops pv_apic_ops = {
14919+struct pv_apic_ops pv_apic_ops __read_only = {
14920 #ifdef CONFIG_X86_LOCAL_APIC
14921 .startup_ipi_hook = paravirt_nop,
14922 #endif
14923 };
14924
14925-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14926+#ifdef CONFIG_X86_32
14927+#ifdef CONFIG_X86_PAE
14928+/* 64-bit pagetable entries */
14929+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14930+#else
14931 /* 32-bit pagetable entries */
14932 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14933+#endif
14934 #else
14935 /* 64-bit pagetable entries */
14936 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14937 #endif
14938
14939-struct pv_mmu_ops pv_mmu_ops = {
14940+struct pv_mmu_ops pv_mmu_ops __read_only = {
14941
14942 .read_cr2 = native_read_cr2,
14943 .write_cr2 = native_write_cr2,
14944@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14945 .make_pud = PTE_IDENT,
14946
14947 .set_pgd = native_set_pgd,
14948+ .set_pgd_batched = native_set_pgd_batched,
14949 #endif
14950 #endif /* PAGETABLE_LEVELS >= 3 */
14951
14952@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14953 },
14954
14955 .set_fixmap = native_set_fixmap,
14956+
14957+#ifdef CONFIG_PAX_KERNEXEC
14958+ .pax_open_kernel = native_pax_open_kernel,
14959+ .pax_close_kernel = native_pax_close_kernel,
14960+#endif
14961+
14962 };
14963
14964 EXPORT_SYMBOL_GPL(pv_time_ops);
14965diff -urNp linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c
14966--- linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-11 15:19:27.000000000 -0500
14967+++ linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-16 18:39:07.000000000 -0500
14968@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14969 arch_spin_lock(lock);
14970 }
14971
14972-struct pv_lock_ops pv_lock_ops = {
14973+struct pv_lock_ops pv_lock_ops __read_only = {
14974 #ifdef CONFIG_SMP
14975 .spin_is_locked = __ticket_spin_is_locked,
14976 .spin_is_contended = __ticket_spin_is_contended,
14977diff -urNp linux-3.1.1/arch/x86/kernel/pci-iommu_table.c linux-3.1.1/arch/x86/kernel/pci-iommu_table.c
14978--- linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-11 15:19:27.000000000 -0500
14979+++ linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-16 18:40:08.000000000 -0500
14980@@ -2,7 +2,7 @@
14981 #include <asm/iommu_table.h>
14982 #include <linux/string.h>
14983 #include <linux/kallsyms.h>
14984-
14985+#include <linux/sched.h>
14986
14987 #define DEBUG 1
14988
14989@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14990 {
14991 struct iommu_table_entry *p, *q, *x;
14992
14993+ pax_track_stack();
14994+
14995 /* Simple cyclic dependency checker. */
14996 for (p = start; p < finish; p++) {
14997 q = find_dependents_of(start, finish, p);
14998diff -urNp linux-3.1.1/arch/x86/kernel/process_32.c linux-3.1.1/arch/x86/kernel/process_32.c
14999--- linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
15000+++ linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-16 18:39:07.000000000 -0500
15001@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
15002 unsigned long thread_saved_pc(struct task_struct *tsk)
15003 {
15004 return ((unsigned long *)tsk->thread.sp)[3];
15005+//XXX return tsk->thread.eip;
15006 }
15007
15008 #ifndef CONFIG_SMP
15009@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, i
15010 unsigned long sp;
15011 unsigned short ss, gs;
15012
15013- if (user_mode_vm(regs)) {
15014+ if (user_mode(regs)) {
15015 sp = regs->sp;
15016 ss = regs->ss & 0xffff;
15017- gs = get_user_gs(regs);
15018 } else {
15019 sp = kernel_stack_pointer(regs);
15020 savesegment(ss, ss);
15021- savesegment(gs, gs);
15022 }
15023+ gs = get_user_gs(regs);
15024
15025 show_regs_common();
15026
15027@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flag
15028 struct task_struct *tsk;
15029 int err;
15030
15031- childregs = task_pt_regs(p);
15032+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15033 *childregs = *regs;
15034 childregs->ax = 0;
15035 childregs->sp = sp;
15036
15037 p->thread.sp = (unsigned long) childregs;
15038 p->thread.sp0 = (unsigned long) (childregs+1);
15039+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15040
15041 p->thread.ip = (unsigned long) ret_from_fork;
15042
15043@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
15044 struct thread_struct *prev = &prev_p->thread,
15045 *next = &next_p->thread;
15046 int cpu = smp_processor_id();
15047- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15048+ struct tss_struct *tss = init_tss + cpu;
15049 bool preload_fpu;
15050
15051 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15052@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p,
15053 */
15054 lazy_save_gs(prev->gs);
15055
15056+#ifdef CONFIG_PAX_MEMORY_UDEREF
15057+ __set_fs(task_thread_info(next_p)->addr_limit);
15058+#endif
15059+
15060 /*
15061 * Load the per-thread Thread-Local Storage descriptor.
15062 */
15063@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p,
15064 */
15065 arch_end_context_switch(next_p);
15066
15067+ percpu_write(current_task, next_p);
15068+ percpu_write(current_tinfo, &next_p->tinfo);
15069+
15070 if (preload_fpu)
15071 __math_state_restore();
15072
15073@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p,
15074 if (prev->gs | next->gs)
15075 lazy_load_gs(next->gs);
15076
15077- percpu_write(current_task, next_p);
15078-
15079 return prev_p;
15080 }
15081
15082@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_stru
15083 } while (count++ < 16);
15084 return 0;
15085 }
15086-
15087diff -urNp linux-3.1.1/arch/x86/kernel/process_64.c linux-3.1.1/arch/x86/kernel/process_64.c
15088--- linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
15089+++ linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-16 18:39:07.000000000 -0500
15090@@ -88,7 +88,7 @@ static void __exit_idle(void)
15091 void exit_idle(void)
15092 {
15093 /* idle loop has pid 0 */
15094- if (current->pid)
15095+ if (task_pid_nr(current))
15096 return;
15097 __exit_idle();
15098 }
15099@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flag
15100 struct pt_regs *childregs;
15101 struct task_struct *me = current;
15102
15103- childregs = ((struct pt_regs *)
15104- (THREAD_SIZE + task_stack_page(p))) - 1;
15105+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15106 *childregs = *regs;
15107
15108 childregs->ax = 0;
15109@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flag
15110 p->thread.sp = (unsigned long) childregs;
15111 p->thread.sp0 = (unsigned long) (childregs+1);
15112 p->thread.usersp = me->thread.usersp;
15113+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15114
15115 set_tsk_thread_flag(p, TIF_FORK);
15116
15117@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p,
15118 struct thread_struct *prev = &prev_p->thread;
15119 struct thread_struct *next = &next_p->thread;
15120 int cpu = smp_processor_id();
15121- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15122+ struct tss_struct *tss = init_tss + cpu;
15123 unsigned fsindex, gsindex;
15124 bool preload_fpu;
15125
15126@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p,
15127 prev->usersp = percpu_read(old_rsp);
15128 percpu_write(old_rsp, next->usersp);
15129 percpu_write(current_task, next_p);
15130+ percpu_write(current_tinfo, &next_p->tinfo);
15131
15132- percpu_write(kernel_stack,
15133- (unsigned long)task_stack_page(next_p) +
15134- THREAD_SIZE - KERNEL_STACK_OFFSET);
15135+ percpu_write(kernel_stack, next->sp0);
15136
15137 /*
15138 * Now maybe reload the debug registers and handle I/O bitmaps
15139@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_stru
15140 if (!p || p == current || p->state == TASK_RUNNING)
15141 return 0;
15142 stack = (unsigned long)task_stack_page(p);
15143- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15144+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15145 return 0;
15146 fp = *(u64 *)(p->thread.sp);
15147 do {
15148- if (fp < (unsigned long)stack ||
15149- fp >= (unsigned long)stack+THREAD_SIZE)
15150+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15151 return 0;
15152 ip = *(u64 *)(fp+8);
15153 if (!in_sched_functions(ip))
15154diff -urNp linux-3.1.1/arch/x86/kernel/process.c linux-3.1.1/arch/x86/kernel/process.c
15155--- linux-3.1.1/arch/x86/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
15156+++ linux-3.1.1/arch/x86/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
15157@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15158
15159 void free_thread_info(struct thread_info *ti)
15160 {
15161- free_thread_xstate(ti->task);
15162 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15163 }
15164
15165+static struct kmem_cache *task_struct_cachep;
15166+
15167 void arch_task_cache_init(void)
15168 {
15169- task_xstate_cachep =
15170- kmem_cache_create("task_xstate", xstate_size,
15171+ /* create a slab on which task_structs can be allocated */
15172+ task_struct_cachep =
15173+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15174+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15175+
15176+ task_xstate_cachep =
15177+ kmem_cache_create("task_xstate", xstate_size,
15178 __alignof__(union thread_xstate),
15179- SLAB_PANIC | SLAB_NOTRACK, NULL);
15180+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15181+}
15182+
15183+struct task_struct *alloc_task_struct_node(int node)
15184+{
15185+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15186+}
15187+
15188+void free_task_struct(struct task_struct *task)
15189+{
15190+ free_thread_xstate(task);
15191+ kmem_cache_free(task_struct_cachep, task);
15192 }
15193
15194 /*
15195@@ -70,7 +87,7 @@ void exit_thread(void)
15196 unsigned long *bp = t->io_bitmap_ptr;
15197
15198 if (bp) {
15199- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15200+ struct tss_struct *tss = init_tss + get_cpu();
15201
15202 t->io_bitmap_ptr = NULL;
15203 clear_thread_flag(TIF_IO_BITMAP);
15204@@ -106,7 +123,7 @@ void show_regs_common(void)
15205
15206 printk(KERN_CONT "\n");
15207 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15208- current->pid, current->comm, print_tainted(),
15209+ task_pid_nr(current), current->comm, print_tainted(),
15210 init_utsname()->release,
15211 (int)strcspn(init_utsname()->version, " "),
15212 init_utsname()->version);
15213@@ -120,6 +137,9 @@ void flush_thread(void)
15214 {
15215 struct task_struct *tsk = current;
15216
15217+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15218+ loadsegment(gs, 0);
15219+#endif
15220 flush_ptrace_hw_breakpoint(tsk);
15221 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15222 /*
15223@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15224 regs.di = (unsigned long) arg;
15225
15226 #ifdef CONFIG_X86_32
15227- regs.ds = __USER_DS;
15228- regs.es = __USER_DS;
15229+ regs.ds = __KERNEL_DS;
15230+ regs.es = __KERNEL_DS;
15231 regs.fs = __KERNEL_PERCPU;
15232- regs.gs = __KERNEL_STACK_CANARY;
15233+ savesegment(gs, regs.gs);
15234 #else
15235 regs.ss = __KERNEL_DS;
15236 #endif
15237@@ -403,7 +423,7 @@ void default_idle(void)
15238 EXPORT_SYMBOL(default_idle);
15239 #endif
15240
15241-void stop_this_cpu(void *dummy)
15242+__noreturn void stop_this_cpu(void *dummy)
15243 {
15244 local_irq_disable();
15245 /*
15246@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
15247 }
15248 early_param("idle", idle_setup);
15249
15250-unsigned long arch_align_stack(unsigned long sp)
15251+#ifdef CONFIG_PAX_RANDKSTACK
15252+void pax_randomize_kstack(struct pt_regs *regs)
15253 {
15254- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15255- sp -= get_random_int() % 8192;
15256- return sp & ~0xf;
15257-}
15258+ struct thread_struct *thread = &current->thread;
15259+ unsigned long time;
15260
15261-unsigned long arch_randomize_brk(struct mm_struct *mm)
15262-{
15263- unsigned long range_end = mm->brk + 0x02000000;
15264- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15265-}
15266+ if (!randomize_va_space)
15267+ return;
15268+
15269+ if (v8086_mode(regs))
15270+ return;
15271
15272+ rdtscl(time);
15273+
15274+ /* P4 seems to return a 0 LSB, ignore it */
15275+#ifdef CONFIG_MPENTIUM4
15276+ time &= 0x3EUL;
15277+ time <<= 2;
15278+#elif defined(CONFIG_X86_64)
15279+ time &= 0xFUL;
15280+ time <<= 4;
15281+#else
15282+ time &= 0x1FUL;
15283+ time <<= 3;
15284+#endif
15285+
15286+ thread->sp0 ^= time;
15287+ load_sp0(init_tss + smp_processor_id(), thread);
15288+
15289+#ifdef CONFIG_X86_64
15290+ percpu_write(kernel_stack, thread->sp0);
15291+#endif
15292+}
15293+#endif
15294diff -urNp linux-3.1.1/arch/x86/kernel/ptrace.c linux-3.1.1/arch/x86/kernel/ptrace.c
15295--- linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
15296+++ linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-16 18:39:07.000000000 -0500
15297@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *chi
15298 unsigned long addr, unsigned long data)
15299 {
15300 int ret;
15301- unsigned long __user *datap = (unsigned long __user *)data;
15302+ unsigned long __user *datap = (__force unsigned long __user *)data;
15303
15304 switch (request) {
15305 /* read the word at location addr in the USER area. */
15306@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *chi
15307 if ((int) addr < 0)
15308 return -EIO;
15309 ret = do_get_thread_area(child, addr,
15310- (struct user_desc __user *)data);
15311+ (__force struct user_desc __user *) data);
15312 break;
15313
15314 case PTRACE_SET_THREAD_AREA:
15315 if ((int) addr < 0)
15316 return -EIO;
15317 ret = do_set_thread_area(child, addr,
15318- (struct user_desc __user *)data, 0);
15319+ (__force struct user_desc __user *) data, 0);
15320 break;
15321 #endif
15322
15323@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct tas
15324 memset(info, 0, sizeof(*info));
15325 info->si_signo = SIGTRAP;
15326 info->si_code = si_code;
15327- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15328+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15329 }
15330
15331 void user_single_step_siginfo(struct task_struct *tsk,
15332diff -urNp linux-3.1.1/arch/x86/kernel/pvclock.c linux-3.1.1/arch/x86/kernel/pvclock.c
15333--- linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-11 15:19:27.000000000 -0500
15334+++ linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-16 18:39:07.000000000 -0500
15335@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15336 return pv_tsc_khz;
15337 }
15338
15339-static atomic64_t last_value = ATOMIC64_INIT(0);
15340+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15341
15342 void pvclock_resume(void)
15343 {
15344- atomic64_set(&last_value, 0);
15345+ atomic64_set_unchecked(&last_value, 0);
15346 }
15347
15348 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15349@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15350 * updating at the same time, and one of them could be slightly behind,
15351 * making the assumption that last_value always go forward fail to hold.
15352 */
15353- last = atomic64_read(&last_value);
15354+ last = atomic64_read_unchecked(&last_value);
15355 do {
15356 if (ret < last)
15357 return last;
15358- last = atomic64_cmpxchg(&last_value, last, ret);
15359+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15360 } while (unlikely(last != ret));
15361
15362 return ret;
15363diff -urNp linux-3.1.1/arch/x86/kernel/reboot.c linux-3.1.1/arch/x86/kernel/reboot.c
15364--- linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-11 15:19:27.000000000 -0500
15365+++ linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-16 18:39:07.000000000 -0500
15366@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15367 EXPORT_SYMBOL(pm_power_off);
15368
15369 static const struct desc_ptr no_idt = {};
15370-static int reboot_mode;
15371+static unsigned short reboot_mode;
15372 enum reboot_type reboot_type = BOOT_ACPI;
15373 int reboot_force;
15374
15375@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15376 extern const unsigned char machine_real_restart_asm[];
15377 extern const u64 machine_real_restart_gdt[3];
15378
15379-void machine_real_restart(unsigned int type)
15380+__noreturn void machine_real_restart(unsigned int type)
15381 {
15382 void *restart_va;
15383 unsigned long restart_pa;
15384- void (*restart_lowmem)(unsigned int);
15385+ void (* __noreturn restart_lowmem)(unsigned int);
15386 u64 *lowmem_gdt;
15387
15388+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15389+ struct desc_struct *gdt;
15390+#endif
15391+
15392 local_irq_disable();
15393
15394 /* Write zero to CMOS register number 0x0f, which the BIOS POST
15395@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15396 boot)". This seems like a fairly standard thing that gets set by
15397 REBOOT.COM programs, and the previous reset routine did this
15398 too. */
15399- *((unsigned short *)0x472) = reboot_mode;
15400+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15401
15402 /* Patch the GDT in the low memory trampoline */
15403 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15404
15405 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15406 restart_pa = virt_to_phys(restart_va);
15407- restart_lowmem = (void (*)(unsigned int))restart_pa;
15408+ restart_lowmem = (void *)restart_pa;
15409
15410 /* GDT[0]: GDT self-pointer */
15411 lowmem_gdt[0] =
15412@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15413 GDT_ENTRY(0x009b, restart_pa, 0xffff);
15414
15415 /* Jump to the identity-mapped low memory code */
15416+
15417+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15418+ gdt = get_cpu_gdt_table(smp_processor_id());
15419+ pax_open_kernel();
15420+#ifdef CONFIG_PAX_MEMORY_UDEREF
15421+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15422+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15423+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15424+#endif
15425+#ifdef CONFIG_PAX_KERNEXEC
15426+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15427+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15428+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15429+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15430+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15431+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15432+#endif
15433+ pax_close_kernel();
15434+#endif
15435+
15436+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15437+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15438+ unreachable();
15439+#else
15440 restart_lowmem(type);
15441+#endif
15442+
15443 }
15444 #ifdef CONFIG_APM_MODULE
15445 EXPORT_SYMBOL(machine_real_restart);
15446@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15447 * try to force a triple fault and then cycle between hitting the keyboard
15448 * controller and doing that
15449 */
15450-static void native_machine_emergency_restart(void)
15451+__noreturn static void native_machine_emergency_restart(void)
15452 {
15453 int i;
15454 int attempt = 0;
15455@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15456 #endif
15457 }
15458
15459-static void __machine_emergency_restart(int emergency)
15460+static __noreturn void __machine_emergency_restart(int emergency)
15461 {
15462 reboot_emergency = emergency;
15463 machine_ops.emergency_restart();
15464 }
15465
15466-static void native_machine_restart(char *__unused)
15467+static __noreturn void native_machine_restart(char *__unused)
15468 {
15469 printk("machine restart\n");
15470
15471@@ -662,7 +692,7 @@ static void native_machine_restart(char
15472 __machine_emergency_restart(0);
15473 }
15474
15475-static void native_machine_halt(void)
15476+static __noreturn void native_machine_halt(void)
15477 {
15478 /* stop other cpus and apics */
15479 machine_shutdown();
15480@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15481 stop_this_cpu(NULL);
15482 }
15483
15484-static void native_machine_power_off(void)
15485+__noreturn static void native_machine_power_off(void)
15486 {
15487 if (pm_power_off) {
15488 if (!reboot_force)
15489@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15490 }
15491 /* a fallback in case there is no PM info available */
15492 tboot_shutdown(TB_SHUTDOWN_HALT);
15493+ unreachable();
15494 }
15495
15496 struct machine_ops machine_ops = {
15497diff -urNp linux-3.1.1/arch/x86/kernel/setup.c linux-3.1.1/arch/x86/kernel/setup.c
15498--- linux-3.1.1/arch/x86/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
15499+++ linux-3.1.1/arch/x86/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
15500@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15501
15502 switch (data->type) {
15503 case SETUP_E820_EXT:
15504- parse_e820_ext(data);
15505+ parse_e820_ext((struct setup_data __force_kernel *)data);
15506 break;
15507 case SETUP_DTB:
15508 add_dtb(pa_data);
15509@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15510 * area (640->1Mb) as ram even though it is not.
15511 * take them out.
15512 */
15513- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15514+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15515 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15516 }
15517
15518@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15519
15520 if (!boot_params.hdr.root_flags)
15521 root_mountflags &= ~MS_RDONLY;
15522- init_mm.start_code = (unsigned long) _text;
15523- init_mm.end_code = (unsigned long) _etext;
15524+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15525+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15526 init_mm.end_data = (unsigned long) _edata;
15527 init_mm.brk = _brk_end;
15528
15529- code_resource.start = virt_to_phys(_text);
15530- code_resource.end = virt_to_phys(_etext)-1;
15531- data_resource.start = virt_to_phys(_etext);
15532+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15533+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15534+ data_resource.start = virt_to_phys(_sdata);
15535 data_resource.end = virt_to_phys(_edata)-1;
15536 bss_resource.start = virt_to_phys(&__bss_start);
15537 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15538diff -urNp linux-3.1.1/arch/x86/kernel/setup_percpu.c linux-3.1.1/arch/x86/kernel/setup_percpu.c
15539--- linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-11 15:19:27.000000000 -0500
15540+++ linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-16 18:39:07.000000000 -0500
15541@@ -21,19 +21,17 @@
15542 #include <asm/cpu.h>
15543 #include <asm/stackprotector.h>
15544
15545-DEFINE_PER_CPU(int, cpu_number);
15546+#ifdef CONFIG_SMP
15547+DEFINE_PER_CPU(unsigned int, cpu_number);
15548 EXPORT_PER_CPU_SYMBOL(cpu_number);
15549+#endif
15550
15551-#ifdef CONFIG_X86_64
15552 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15553-#else
15554-#define BOOT_PERCPU_OFFSET 0
15555-#endif
15556
15557 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15558 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15559
15560-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15561+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15562 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15563 };
15564 EXPORT_SYMBOL(__per_cpu_offset);
15565@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15566 {
15567 #ifdef CONFIG_X86_32
15568 struct desc_struct gdt;
15569+ unsigned long base = per_cpu_offset(cpu);
15570
15571- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15572- 0x2 | DESCTYPE_S, 0x8);
15573- gdt.s = 1;
15574+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15575+ 0x83 | DESCTYPE_S, 0xC);
15576 write_gdt_entry(get_cpu_gdt_table(cpu),
15577 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15578 #endif
15579@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15580 /* alrighty, percpu areas up and running */
15581 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15582 for_each_possible_cpu(cpu) {
15583+#ifdef CONFIG_CC_STACKPROTECTOR
15584+#ifdef CONFIG_X86_32
15585+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
15586+#endif
15587+#endif
15588 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15589 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15590 per_cpu(cpu_number, cpu) = cpu;
15591@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15592 */
15593 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15594 #endif
15595+#ifdef CONFIG_CC_STACKPROTECTOR
15596+#ifdef CONFIG_X86_32
15597+ if (!cpu)
15598+ per_cpu(stack_canary.canary, cpu) = canary;
15599+#endif
15600+#endif
15601 /*
15602 * Up to this point, the boot CPU has been using .init.data
15603 * area. Reload any changed state for the boot CPU.
15604diff -urNp linux-3.1.1/arch/x86/kernel/signal.c linux-3.1.1/arch/x86/kernel/signal.c
15605--- linux-3.1.1/arch/x86/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
15606+++ linux-3.1.1/arch/x86/kernel/signal.c 2011-11-16 19:39:49.000000000 -0500
15607@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15608 * Align the stack pointer according to the i386 ABI,
15609 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15610 */
15611- sp = ((sp + 4) & -16ul) - 4;
15612+ sp = ((sp - 12) & -16ul) - 4;
15613 #else /* !CONFIG_X86_32 */
15614 sp = round_down(sp, 16) - 8;
15615 #endif
15616@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15617 * Return an always-bogus address instead so we will die with SIGSEGV.
15618 */
15619 if (onsigstack && !likely(on_sig_stack(sp)))
15620- return (void __user *)-1L;
15621+ return (__force void __user *)-1L;
15622
15623 /* save i387 state */
15624 if (used_math() && save_i387_xstate(*fpstate) < 0)
15625- return (void __user *)-1L;
15626+ return (__force void __user *)-1L;
15627
15628 return (void __user *)sp;
15629 }
15630@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15631 }
15632
15633 if (current->mm->context.vdso)
15634- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15635+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15636 else
15637- restorer = &frame->retcode;
15638+ restorer = (void __user *)&frame->retcode;
15639 if (ka->sa.sa_flags & SA_RESTORER)
15640 restorer = ka->sa.sa_restorer;
15641
15642@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15643 * reasons and because gdb uses it as a signature to notice
15644 * signal handler stack frames.
15645 */
15646- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15647+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15648
15649 if (err)
15650 return -EFAULT;
15651@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15652 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15653
15654 /* Set up to return from userspace. */
15655- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15656+ if (current->mm->context.vdso)
15657+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15658+ else
15659+ restorer = (void __user *)&frame->retcode;
15660 if (ka->sa.sa_flags & SA_RESTORER)
15661 restorer = ka->sa.sa_restorer;
15662 put_user_ex(restorer, &frame->pretcode);
15663@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15664 * reasons and because gdb uses it as a signature to notice
15665 * signal handler stack frames.
15666 */
15667- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15668+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15669 } put_user_catch(err);
15670
15671 if (err)
15672@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *re
15673 siginfo_t info;
15674 int signr;
15675
15676+ pax_track_stack();
15677+
15678 /*
15679 * We want the common case to go fast, which is why we may in certain
15680 * cases get here from kernel mode. Just return without doing anything
15681@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *re
15682 * X86_32: vm86 regs switched out by assembly code before reaching
15683 * here, so testing against kernel CS suffices.
15684 */
15685- if (!user_mode(regs))
15686+ if (!user_mode_novm(regs))
15687 return;
15688
15689 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
15690diff -urNp linux-3.1.1/arch/x86/kernel/smpboot.c linux-3.1.1/arch/x86/kernel/smpboot.c
15691--- linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-11 15:19:27.000000000 -0500
15692+++ linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-16 18:39:07.000000000 -0500
15693@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15694 set_idle_for_cpu(cpu, c_idle.idle);
15695 do_rest:
15696 per_cpu(current_task, cpu) = c_idle.idle;
15697+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15698 #ifdef CONFIG_X86_32
15699 /* Stack for startup_32 can be just as for start_secondary onwards */
15700 irq_ctx_init(cpu);
15701 #else
15702 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15703 initial_gs = per_cpu_offset(cpu);
15704- per_cpu(kernel_stack, cpu) =
15705- (unsigned long)task_stack_page(c_idle.idle) -
15706- KERNEL_STACK_OFFSET + THREAD_SIZE;
15707+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15708 #endif
15709+
15710+ pax_open_kernel();
15711 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15712+ pax_close_kernel();
15713+
15714 initial_code = (unsigned long)start_secondary;
15715 stack_start = c_idle.idle->thread.sp;
15716
15717@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15718
15719 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15720
15721+#ifdef CONFIG_PAX_PER_CPU_PGD
15722+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15723+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15724+ KERNEL_PGD_PTRS);
15725+#endif
15726+
15727 err = do_boot_cpu(apicid, cpu);
15728 if (err) {
15729 pr_debug("do_boot_cpu failed %d\n", err);
15730diff -urNp linux-3.1.1/arch/x86/kernel/step.c linux-3.1.1/arch/x86/kernel/step.c
15731--- linux-3.1.1/arch/x86/kernel/step.c 2011-11-11 15:19:27.000000000 -0500
15732+++ linux-3.1.1/arch/x86/kernel/step.c 2011-11-16 18:39:07.000000000 -0500
15733@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15734 struct desc_struct *desc;
15735 unsigned long base;
15736
15737- seg &= ~7UL;
15738+ seg >>= 3;
15739
15740 mutex_lock(&child->mm->context.lock);
15741- if (unlikely((seg >> 3) >= child->mm->context.size))
15742+ if (unlikely(seg >= child->mm->context.size))
15743 addr = -1L; /* bogus selector, access would fault */
15744 else {
15745 desc = child->mm->context.ldt + seg;
15746@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15747 addr += base;
15748 }
15749 mutex_unlock(&child->mm->context.lock);
15750- }
15751+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15752+ addr = ktla_ktva(addr);
15753
15754 return addr;
15755 }
15756@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15757 unsigned char opcode[15];
15758 unsigned long addr = convert_ip_to_linear(child, regs);
15759
15760+ if (addr == -EINVAL)
15761+ return 0;
15762+
15763 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15764 for (i = 0; i < copied; i++) {
15765 switch (opcode[i]) {
15766diff -urNp linux-3.1.1/arch/x86/kernel/syscall_table_32.S linux-3.1.1/arch/x86/kernel/syscall_table_32.S
15767--- linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-11 15:19:27.000000000 -0500
15768+++ linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-16 18:39:07.000000000 -0500
15769@@ -1,3 +1,4 @@
15770+.section .rodata,"a",@progbits
15771 ENTRY(sys_call_table)
15772 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15773 .long sys_exit
15774diff -urNp linux-3.1.1/arch/x86/kernel/sys_i386_32.c linux-3.1.1/arch/x86/kernel/sys_i386_32.c
15775--- linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-11 15:19:27.000000000 -0500
15776+++ linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-16 18:39:07.000000000 -0500
15777@@ -24,17 +24,224 @@
15778
15779 #include <asm/syscalls.h>
15780
15781-/*
15782- * Do a system call from kernel instead of calling sys_execve so we
15783- * end up with proper pt_regs.
15784- */
15785-int kernel_execve(const char *filename,
15786- const char *const argv[],
15787- const char *const envp[])
15788+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15789 {
15790- long __res;
15791- asm volatile ("int $0x80"
15792- : "=a" (__res)
15793- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15794- return __res;
15795+ unsigned long pax_task_size = TASK_SIZE;
15796+
15797+#ifdef CONFIG_PAX_SEGMEXEC
15798+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15799+ pax_task_size = SEGMEXEC_TASK_SIZE;
15800+#endif
15801+
15802+ if (len > pax_task_size || addr > pax_task_size - len)
15803+ return -EINVAL;
15804+
15805+ return 0;
15806+}
15807+
15808+unsigned long
15809+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15810+ unsigned long len, unsigned long pgoff, unsigned long flags)
15811+{
15812+ struct mm_struct *mm = current->mm;
15813+ struct vm_area_struct *vma;
15814+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15815+
15816+#ifdef CONFIG_PAX_SEGMEXEC
15817+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15818+ pax_task_size = SEGMEXEC_TASK_SIZE;
15819+#endif
15820+
15821+ pax_task_size -= PAGE_SIZE;
15822+
15823+ if (len > pax_task_size)
15824+ return -ENOMEM;
15825+
15826+ if (flags & MAP_FIXED)
15827+ return addr;
15828+
15829+#ifdef CONFIG_PAX_RANDMMAP
15830+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15831+#endif
15832+
15833+ if (addr) {
15834+ addr = PAGE_ALIGN(addr);
15835+ if (pax_task_size - len >= addr) {
15836+ vma = find_vma(mm, addr);
15837+ if (check_heap_stack_gap(vma, addr, len))
15838+ return addr;
15839+ }
15840+ }
15841+ if (len > mm->cached_hole_size) {
15842+ start_addr = addr = mm->free_area_cache;
15843+ } else {
15844+ start_addr = addr = mm->mmap_base;
15845+ mm->cached_hole_size = 0;
15846+ }
15847+
15848+#ifdef CONFIG_PAX_PAGEEXEC
15849+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15850+ start_addr = 0x00110000UL;
15851+
15852+#ifdef CONFIG_PAX_RANDMMAP
15853+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15854+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15855+#endif
15856+
15857+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15858+ start_addr = addr = mm->mmap_base;
15859+ else
15860+ addr = start_addr;
15861+ }
15862+#endif
15863+
15864+full_search:
15865+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15866+ /* At this point: (!vma || addr < vma->vm_end). */
15867+ if (pax_task_size - len < addr) {
15868+ /*
15869+ * Start a new search - just in case we missed
15870+ * some holes.
15871+ */
15872+ if (start_addr != mm->mmap_base) {
15873+ start_addr = addr = mm->mmap_base;
15874+ mm->cached_hole_size = 0;
15875+ goto full_search;
15876+ }
15877+ return -ENOMEM;
15878+ }
15879+ if (check_heap_stack_gap(vma, addr, len))
15880+ break;
15881+ if (addr + mm->cached_hole_size < vma->vm_start)
15882+ mm->cached_hole_size = vma->vm_start - addr;
15883+ addr = vma->vm_end;
15884+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15885+ start_addr = addr = mm->mmap_base;
15886+ mm->cached_hole_size = 0;
15887+ goto full_search;
15888+ }
15889+ }
15890+
15891+ /*
15892+ * Remember the place where we stopped the search:
15893+ */
15894+ mm->free_area_cache = addr + len;
15895+ return addr;
15896+}
15897+
15898+unsigned long
15899+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15900+ const unsigned long len, const unsigned long pgoff,
15901+ const unsigned long flags)
15902+{
15903+ struct vm_area_struct *vma;
15904+ struct mm_struct *mm = current->mm;
15905+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15906+
15907+#ifdef CONFIG_PAX_SEGMEXEC
15908+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15909+ pax_task_size = SEGMEXEC_TASK_SIZE;
15910+#endif
15911+
15912+ pax_task_size -= PAGE_SIZE;
15913+
15914+ /* requested length too big for entire address space */
15915+ if (len > pax_task_size)
15916+ return -ENOMEM;
15917+
15918+ if (flags & MAP_FIXED)
15919+ return addr;
15920+
15921+#ifdef CONFIG_PAX_PAGEEXEC
15922+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15923+ goto bottomup;
15924+#endif
15925+
15926+#ifdef CONFIG_PAX_RANDMMAP
15927+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15928+#endif
15929+
15930+ /* requesting a specific address */
15931+ if (addr) {
15932+ addr = PAGE_ALIGN(addr);
15933+ if (pax_task_size - len >= addr) {
15934+ vma = find_vma(mm, addr);
15935+ if (check_heap_stack_gap(vma, addr, len))
15936+ return addr;
15937+ }
15938+ }
15939+
15940+ /* check if free_area_cache is useful for us */
15941+ if (len <= mm->cached_hole_size) {
15942+ mm->cached_hole_size = 0;
15943+ mm->free_area_cache = mm->mmap_base;
15944+ }
15945+
15946+ /* either no address requested or can't fit in requested address hole */
15947+ addr = mm->free_area_cache;
15948+
15949+ /* make sure it can fit in the remaining address space */
15950+ if (addr > len) {
15951+ vma = find_vma(mm, addr-len);
15952+ if (check_heap_stack_gap(vma, addr - len, len))
15953+ /* remember the address as a hint for next time */
15954+ return (mm->free_area_cache = addr-len);
15955+ }
15956+
15957+ if (mm->mmap_base < len)
15958+ goto bottomup;
15959+
15960+ addr = mm->mmap_base-len;
15961+
15962+ do {
15963+ /*
15964+ * Lookup failure means no vma is above this address,
15965+ * else if new region fits below vma->vm_start,
15966+ * return with success:
15967+ */
15968+ vma = find_vma(mm, addr);
15969+ if (check_heap_stack_gap(vma, addr, len))
15970+ /* remember the address as a hint for next time */
15971+ return (mm->free_area_cache = addr);
15972+
15973+ /* remember the largest hole we saw so far */
15974+ if (addr + mm->cached_hole_size < vma->vm_start)
15975+ mm->cached_hole_size = vma->vm_start - addr;
15976+
15977+ /* try just below the current vma->vm_start */
15978+ addr = skip_heap_stack_gap(vma, len);
15979+ } while (!IS_ERR_VALUE(addr));
15980+
15981+bottomup:
15982+ /*
15983+ * A failed mmap() very likely causes application failure,
15984+ * so fall back to the bottom-up function here. This scenario
15985+ * can happen with large stack limits and large mmap()
15986+ * allocations.
15987+ */
15988+
15989+#ifdef CONFIG_PAX_SEGMEXEC
15990+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15991+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15992+ else
15993+#endif
15994+
15995+ mm->mmap_base = TASK_UNMAPPED_BASE;
15996+
15997+#ifdef CONFIG_PAX_RANDMMAP
15998+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15999+ mm->mmap_base += mm->delta_mmap;
16000+#endif
16001+
16002+ mm->free_area_cache = mm->mmap_base;
16003+ mm->cached_hole_size = ~0UL;
16004+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16005+ /*
16006+ * Restore the topdown base:
16007+ */
16008+ mm->mmap_base = base;
16009+ mm->free_area_cache = base;
16010+ mm->cached_hole_size = ~0UL;
16011+
16012+ return addr;
16013 }
16014diff -urNp linux-3.1.1/arch/x86/kernel/sys_x86_64.c linux-3.1.1/arch/x86/kernel/sys_x86_64.c
16015--- linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-11 15:19:27.000000000 -0500
16016+++ linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-16 18:39:07.000000000 -0500
16017@@ -32,8 +32,8 @@ out:
16018 return error;
16019 }
16020
16021-static void find_start_end(unsigned long flags, unsigned long *begin,
16022- unsigned long *end)
16023+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16024+ unsigned long *begin, unsigned long *end)
16025 {
16026 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16027 unsigned long new_begin;
16028@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16029 *begin = new_begin;
16030 }
16031 } else {
16032- *begin = TASK_UNMAPPED_BASE;
16033+ *begin = mm->mmap_base;
16034 *end = TASK_SIZE;
16035 }
16036 }
16037@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16038 if (flags & MAP_FIXED)
16039 return addr;
16040
16041- find_start_end(flags, &begin, &end);
16042+ find_start_end(mm, flags, &begin, &end);
16043
16044 if (len > end)
16045 return -ENOMEM;
16046
16047+#ifdef CONFIG_PAX_RANDMMAP
16048+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16049+#endif
16050+
16051 if (addr) {
16052 addr = PAGE_ALIGN(addr);
16053 vma = find_vma(mm, addr);
16054- if (end - len >= addr &&
16055- (!vma || addr + len <= vma->vm_start))
16056+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16057 return addr;
16058 }
16059 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16060@@ -106,7 +109,7 @@ full_search:
16061 }
16062 return -ENOMEM;
16063 }
16064- if (!vma || addr + len <= vma->vm_start) {
16065+ if (check_heap_stack_gap(vma, addr, len)) {
16066 /*
16067 * Remember the place where we stopped the search:
16068 */
16069@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16070 {
16071 struct vm_area_struct *vma;
16072 struct mm_struct *mm = current->mm;
16073- unsigned long addr = addr0;
16074+ unsigned long base = mm->mmap_base, addr = addr0;
16075
16076 /* requested length too big for entire address space */
16077 if (len > TASK_SIZE)
16078@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16079 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16080 goto bottomup;
16081
16082+#ifdef CONFIG_PAX_RANDMMAP
16083+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16084+#endif
16085+
16086 /* requesting a specific address */
16087 if (addr) {
16088 addr = PAGE_ALIGN(addr);
16089- vma = find_vma(mm, addr);
16090- if (TASK_SIZE - len >= addr &&
16091- (!vma || addr + len <= vma->vm_start))
16092- return addr;
16093+ if (TASK_SIZE - len >= addr) {
16094+ vma = find_vma(mm, addr);
16095+ if (check_heap_stack_gap(vma, addr, len))
16096+ return addr;
16097+ }
16098 }
16099
16100 /* check if free_area_cache is useful for us */
16101@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16102 /* make sure it can fit in the remaining address space */
16103 if (addr > len) {
16104 vma = find_vma(mm, addr-len);
16105- if (!vma || addr <= vma->vm_start)
16106+ if (check_heap_stack_gap(vma, addr - len, len))
16107 /* remember the address as a hint for next time */
16108 return mm->free_area_cache = addr-len;
16109 }
16110@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16111 * return with success:
16112 */
16113 vma = find_vma(mm, addr);
16114- if (!vma || addr+len <= vma->vm_start)
16115+ if (check_heap_stack_gap(vma, addr, len))
16116 /* remember the address as a hint for next time */
16117 return mm->free_area_cache = addr;
16118
16119@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16120 mm->cached_hole_size = vma->vm_start - addr;
16121
16122 /* try just below the current vma->vm_start */
16123- addr = vma->vm_start-len;
16124- } while (len < vma->vm_start);
16125+ addr = skip_heap_stack_gap(vma, len);
16126+ } while (!IS_ERR_VALUE(addr));
16127
16128 bottomup:
16129 /*
16130@@ -198,13 +206,21 @@ bottomup:
16131 * can happen with large stack limits and large mmap()
16132 * allocations.
16133 */
16134+ mm->mmap_base = TASK_UNMAPPED_BASE;
16135+
16136+#ifdef CONFIG_PAX_RANDMMAP
16137+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16138+ mm->mmap_base += mm->delta_mmap;
16139+#endif
16140+
16141+ mm->free_area_cache = mm->mmap_base;
16142 mm->cached_hole_size = ~0UL;
16143- mm->free_area_cache = TASK_UNMAPPED_BASE;
16144 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16145 /*
16146 * Restore the topdown base:
16147 */
16148- mm->free_area_cache = mm->mmap_base;
16149+ mm->mmap_base = base;
16150+ mm->free_area_cache = base;
16151 mm->cached_hole_size = ~0UL;
16152
16153 return addr;
16154diff -urNp linux-3.1.1/arch/x86/kernel/tboot.c linux-3.1.1/arch/x86/kernel/tboot.c
16155--- linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-11 15:19:27.000000000 -0500
16156+++ linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-16 18:39:07.000000000 -0500
16157@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
16158
16159 void tboot_shutdown(u32 shutdown_type)
16160 {
16161- void (*shutdown)(void);
16162+ void (* __noreturn shutdown)(void);
16163
16164 if (!tboot_enabled())
16165 return;
16166@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
16167
16168 switch_to_tboot_pt();
16169
16170- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16171+ shutdown = (void *)tboot->shutdown_entry;
16172 shutdown();
16173
16174 /* should not reach here */
16175@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16176 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16177 }
16178
16179-static atomic_t ap_wfs_count;
16180+static atomic_unchecked_t ap_wfs_count;
16181
16182 static int tboot_wait_for_aps(int num_aps)
16183 {
16184@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
16185 {
16186 switch (action) {
16187 case CPU_DYING:
16188- atomic_inc(&ap_wfs_count);
16189+ atomic_inc_unchecked(&ap_wfs_count);
16190 if (num_online_cpus() == 1)
16191- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16192+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16193 return NOTIFY_BAD;
16194 break;
16195 }
16196@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
16197
16198 tboot_create_trampoline();
16199
16200- atomic_set(&ap_wfs_count, 0);
16201+ atomic_set_unchecked(&ap_wfs_count, 0);
16202 register_hotcpu_notifier(&tboot_cpu_notifier);
16203 return 0;
16204 }
16205diff -urNp linux-3.1.1/arch/x86/kernel/time.c linux-3.1.1/arch/x86/kernel/time.c
16206--- linux-3.1.1/arch/x86/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
16207+++ linux-3.1.1/arch/x86/kernel/time.c 2011-11-16 18:39:07.000000000 -0500
16208@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16209 {
16210 unsigned long pc = instruction_pointer(regs);
16211
16212- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16213+ if (!user_mode(regs) && in_lock_functions(pc)) {
16214 #ifdef CONFIG_FRAME_POINTER
16215- return *(unsigned long *)(regs->bp + sizeof(long));
16216+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16217 #else
16218 unsigned long *sp =
16219 (unsigned long *)kernel_stack_pointer(regs);
16220@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16221 * or above a saved flags. Eflags has bits 22-31 zero,
16222 * kernel addresses don't.
16223 */
16224+
16225+#ifdef CONFIG_PAX_KERNEXEC
16226+ return ktla_ktva(sp[0]);
16227+#else
16228 if (sp[0] >> 22)
16229 return sp[0];
16230 if (sp[1] >> 22)
16231 return sp[1];
16232 #endif
16233+
16234+#endif
16235 }
16236 return pc;
16237 }
16238diff -urNp linux-3.1.1/arch/x86/kernel/tls.c linux-3.1.1/arch/x86/kernel/tls.c
16239--- linux-3.1.1/arch/x86/kernel/tls.c 2011-11-11 15:19:27.000000000 -0500
16240+++ linux-3.1.1/arch/x86/kernel/tls.c 2011-11-16 18:39:07.000000000 -0500
16241@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16242 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16243 return -EINVAL;
16244
16245+#ifdef CONFIG_PAX_SEGMEXEC
16246+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16247+ return -EINVAL;
16248+#endif
16249+
16250 set_tls_desc(p, idx, &info, 1);
16251
16252 return 0;
16253diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_32.S linux-3.1.1/arch/x86/kernel/trampoline_32.S
16254--- linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-11 15:19:27.000000000 -0500
16255+++ linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-16 18:39:07.000000000 -0500
16256@@ -32,6 +32,12 @@
16257 #include <asm/segment.h>
16258 #include <asm/page_types.h>
16259
16260+#ifdef CONFIG_PAX_KERNEXEC
16261+#define ta(X) (X)
16262+#else
16263+#define ta(X) ((X) - __PAGE_OFFSET)
16264+#endif
16265+
16266 #ifdef CONFIG_SMP
16267
16268 .section ".x86_trampoline","a"
16269@@ -62,7 +68,7 @@ r_base = .
16270 inc %ax # protected mode (PE) bit
16271 lmsw %ax # into protected mode
16272 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16273- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16274+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16275
16276 # These need to be in the same 64K segment as the above;
16277 # hence we don't use the boot_gdt_descr defined in head.S
16278diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_64.S linux-3.1.1/arch/x86/kernel/trampoline_64.S
16279--- linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-11 15:19:27.000000000 -0500
16280+++ linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-16 18:39:07.000000000 -0500
16281@@ -90,7 +90,7 @@ startup_32:
16282 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16283 movl %eax, %ds
16284
16285- movl $X86_CR4_PAE, %eax
16286+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16287 movl %eax, %cr4 # Enable PAE mode
16288
16289 # Setup trampoline 4 level pagetables
16290@@ -138,7 +138,7 @@ tidt:
16291 # so the kernel can live anywhere
16292 .balign 4
16293 tgdt:
16294- .short tgdt_end - tgdt # gdt limit
16295+ .short tgdt_end - tgdt - 1 # gdt limit
16296 .long tgdt - r_base
16297 .short 0
16298 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16299diff -urNp linux-3.1.1/arch/x86/kernel/traps.c linux-3.1.1/arch/x86/kernel/traps.c
16300--- linux-3.1.1/arch/x86/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
16301+++ linux-3.1.1/arch/x86/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
16302@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16303
16304 /* Do we ignore FPU interrupts ? */
16305 char ignore_fpu_irq;
16306-
16307-/*
16308- * The IDT has to be page-aligned to simplify the Pentium
16309- * F0 0F bug workaround.
16310- */
16311-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16312 #endif
16313
16314 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16315@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16316 }
16317
16318 static void __kprobes
16319-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16320+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16321 long error_code, siginfo_t *info)
16322 {
16323 struct task_struct *tsk = current;
16324
16325 #ifdef CONFIG_X86_32
16326- if (regs->flags & X86_VM_MASK) {
16327+ if (v8086_mode(regs)) {
16328 /*
16329 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16330 * On nmi (interrupt 2), do_trap should not be called.
16331@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16332 }
16333 #endif
16334
16335- if (!user_mode(regs))
16336+ if (!user_mode_novm(regs))
16337 goto kernel_trap;
16338
16339 #ifdef CONFIG_X86_32
16340@@ -157,7 +151,7 @@ trap_signal:
16341 printk_ratelimit()) {
16342 printk(KERN_INFO
16343 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16344- tsk->comm, tsk->pid, str,
16345+ tsk->comm, task_pid_nr(tsk), str,
16346 regs->ip, regs->sp, error_code);
16347 print_vma_addr(" in ", regs->ip);
16348 printk("\n");
16349@@ -174,8 +168,20 @@ kernel_trap:
16350 if (!fixup_exception(regs)) {
16351 tsk->thread.error_code = error_code;
16352 tsk->thread.trap_no = trapnr;
16353+
16354+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16355+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16356+ str = "PAX: suspicious stack segment fault";
16357+#endif
16358+
16359 die(str, regs, error_code);
16360 }
16361+
16362+#ifdef CONFIG_PAX_REFCOUNT
16363+ if (trapnr == 4)
16364+ pax_report_refcount_overflow(regs);
16365+#endif
16366+
16367 return;
16368
16369 #ifdef CONFIG_X86_32
16370@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16371 conditional_sti(regs);
16372
16373 #ifdef CONFIG_X86_32
16374- if (regs->flags & X86_VM_MASK)
16375+ if (v8086_mode(regs))
16376 goto gp_in_vm86;
16377 #endif
16378
16379 tsk = current;
16380- if (!user_mode(regs))
16381+ if (!user_mode_novm(regs))
16382 goto gp_in_kernel;
16383
16384+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16385+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16386+ struct mm_struct *mm = tsk->mm;
16387+ unsigned long limit;
16388+
16389+ down_write(&mm->mmap_sem);
16390+ limit = mm->context.user_cs_limit;
16391+ if (limit < TASK_SIZE) {
16392+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16393+ up_write(&mm->mmap_sem);
16394+ return;
16395+ }
16396+ up_write(&mm->mmap_sem);
16397+ }
16398+#endif
16399+
16400 tsk->thread.error_code = error_code;
16401 tsk->thread.trap_no = 13;
16402
16403@@ -304,6 +326,13 @@ gp_in_kernel:
16404 if (notify_die(DIE_GPF, "general protection fault", regs,
16405 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16406 return;
16407+
16408+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16409+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16410+ die("PAX: suspicious general protection fault", regs, error_code);
16411+ else
16412+#endif
16413+
16414 die("general protection fault", regs, error_code);
16415 }
16416
16417@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16418 dotraplinkage notrace __kprobes void
16419 do_nmi(struct pt_regs *regs, long error_code)
16420 {
16421+
16422+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16423+ if (!user_mode(regs)) {
16424+ unsigned long cs = regs->cs & 0xFFFF;
16425+ unsigned long ip = ktva_ktla(regs->ip);
16426+
16427+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16428+ regs->ip = ip;
16429+ }
16430+#endif
16431+
16432 nmi_enter();
16433
16434 inc_irq_stat(__nmi_count);
16435@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16436 /* It's safe to allow irq's after DR6 has been saved */
16437 preempt_conditional_sti(regs);
16438
16439- if (regs->flags & X86_VM_MASK) {
16440+ if (v8086_mode(regs)) {
16441 handle_vm86_trap((struct kernel_vm86_regs *) regs,
16442 error_code, 1);
16443 preempt_conditional_cli(regs);
16444@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16445 * We already checked v86 mode above, so we can check for kernel mode
16446 * by just checking the CPL of CS.
16447 */
16448- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16449+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16450 tsk->thread.debugreg6 &= ~DR_STEP;
16451 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16452 regs->flags &= ~X86_EFLAGS_TF;
16453@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16454 return;
16455 conditional_sti(regs);
16456
16457- if (!user_mode_vm(regs))
16458+ if (!user_mode(regs))
16459 {
16460 if (!fixup_exception(regs)) {
16461 task->thread.error_code = error_code;
16462@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16463 void __math_state_restore(void)
16464 {
16465 struct thread_info *thread = current_thread_info();
16466- struct task_struct *tsk = thread->task;
16467+ struct task_struct *tsk = current;
16468
16469 /*
16470 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16471@@ -750,8 +790,7 @@ void __math_state_restore(void)
16472 */
16473 asmlinkage void math_state_restore(void)
16474 {
16475- struct thread_info *thread = current_thread_info();
16476- struct task_struct *tsk = thread->task;
16477+ struct task_struct *tsk = current;
16478
16479 if (!tsk_used_math(tsk)) {
16480 local_irq_enable();
16481diff -urNp linux-3.1.1/arch/x86/kernel/verify_cpu.S linux-3.1.1/arch/x86/kernel/verify_cpu.S
16482--- linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-11 15:19:27.000000000 -0500
16483+++ linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-16 18:40:08.000000000 -0500
16484@@ -20,6 +20,7 @@
16485 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16486 * arch/x86/kernel/trampoline_64.S: secondary processor verification
16487 * arch/x86/kernel/head_32.S: processor startup
16488+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16489 *
16490 * verify_cpu, returns the status of longmode and SSE in register %eax.
16491 * 0: Success 1: Failure
16492diff -urNp linux-3.1.1/arch/x86/kernel/vm86_32.c linux-3.1.1/arch/x86/kernel/vm86_32.c
16493--- linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-11 15:19:27.000000000 -0500
16494+++ linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-16 18:40:08.000000000 -0500
16495@@ -41,6 +41,7 @@
16496 #include <linux/ptrace.h>
16497 #include <linux/audit.h>
16498 #include <linux/stddef.h>
16499+#include <linux/grsecurity.h>
16500
16501 #include <asm/uaccess.h>
16502 #include <asm/io.h>
16503@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16504 do_exit(SIGSEGV);
16505 }
16506
16507- tss = &per_cpu(init_tss, get_cpu());
16508+ tss = init_tss + get_cpu();
16509 current->thread.sp0 = current->thread.saved_sp0;
16510 current->thread.sysenter_cs = __KERNEL_CS;
16511 load_sp0(tss, &current->thread);
16512@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16513 struct task_struct *tsk;
16514 int tmp, ret = -EPERM;
16515
16516+#ifdef CONFIG_GRKERNSEC_VM86
16517+ if (!capable(CAP_SYS_RAWIO)) {
16518+ gr_handle_vm86();
16519+ goto out;
16520+ }
16521+#endif
16522+
16523 tsk = current;
16524 if (tsk->thread.saved_sp0)
16525 goto out;
16526@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16527 int tmp, ret;
16528 struct vm86plus_struct __user *v86;
16529
16530+#ifdef CONFIG_GRKERNSEC_VM86
16531+ if (!capable(CAP_SYS_RAWIO)) {
16532+ gr_handle_vm86();
16533+ ret = -EPERM;
16534+ goto out;
16535+ }
16536+#endif
16537+
16538 tsk = current;
16539 switch (cmd) {
16540 case VM86_REQUEST_IRQ:
16541@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16542 tsk->thread.saved_fs = info->regs32->fs;
16543 tsk->thread.saved_gs = get_user_gs(info->regs32);
16544
16545- tss = &per_cpu(init_tss, get_cpu());
16546+ tss = init_tss + get_cpu();
16547 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16548 if (cpu_has_sep)
16549 tsk->thread.sysenter_cs = 0;
16550@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16551 goto cannot_handle;
16552 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16553 goto cannot_handle;
16554- intr_ptr = (unsigned long __user *) (i << 2);
16555+ intr_ptr = (__force unsigned long __user *) (i << 2);
16556 if (get_user(segoffs, intr_ptr))
16557 goto cannot_handle;
16558 if ((segoffs >> 16) == BIOSSEG)
16559diff -urNp linux-3.1.1/arch/x86/kernel/vmlinux.lds.S linux-3.1.1/arch/x86/kernel/vmlinux.lds.S
16560--- linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
16561+++ linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
16562@@ -26,6 +26,13 @@
16563 #include <asm/page_types.h>
16564 #include <asm/cache.h>
16565 #include <asm/boot.h>
16566+#include <asm/segment.h>
16567+
16568+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16569+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16570+#else
16571+#define __KERNEL_TEXT_OFFSET 0
16572+#endif
16573
16574 #undef i386 /* in case the preprocessor is a 32bit one */
16575
16576@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
16577
16578 PHDRS {
16579 text PT_LOAD FLAGS(5); /* R_E */
16580+#ifdef CONFIG_X86_32
16581+ module PT_LOAD FLAGS(5); /* R_E */
16582+#endif
16583+#ifdef CONFIG_XEN
16584+ rodata PT_LOAD FLAGS(5); /* R_E */
16585+#else
16586+ rodata PT_LOAD FLAGS(4); /* R__ */
16587+#endif
16588 data PT_LOAD FLAGS(6); /* RW_ */
16589-#ifdef CONFIG_X86_64
16590+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16591 #ifdef CONFIG_SMP
16592 percpu PT_LOAD FLAGS(6); /* RW_ */
16593 #endif
16594+ text.init PT_LOAD FLAGS(5); /* R_E */
16595+ text.exit PT_LOAD FLAGS(5); /* R_E */
16596 init PT_LOAD FLAGS(7); /* RWE */
16597-#endif
16598 note PT_NOTE FLAGS(0); /* ___ */
16599 }
16600
16601 SECTIONS
16602 {
16603 #ifdef CONFIG_X86_32
16604- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16605- phys_startup_32 = startup_32 - LOAD_OFFSET;
16606+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16607 #else
16608- . = __START_KERNEL;
16609- phys_startup_64 = startup_64 - LOAD_OFFSET;
16610+ . = __START_KERNEL;
16611 #endif
16612
16613 /* Text and read-only data */
16614- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16615- _text = .;
16616+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16617 /* bootstrapping code */
16618+#ifdef CONFIG_X86_32
16619+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16620+#else
16621+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16622+#endif
16623+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16624+ _text = .;
16625 HEAD_TEXT
16626 #ifdef CONFIG_X86_32
16627 . = ALIGN(PAGE_SIZE);
16628@@ -108,13 +128,47 @@ SECTIONS
16629 IRQENTRY_TEXT
16630 *(.fixup)
16631 *(.gnu.warning)
16632- /* End of text section */
16633- _etext = .;
16634 } :text = 0x9090
16635
16636- NOTES :text :note
16637+ . += __KERNEL_TEXT_OFFSET;
16638+
16639+#ifdef CONFIG_X86_32
16640+ . = ALIGN(PAGE_SIZE);
16641+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16642+
16643+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16644+ MODULES_EXEC_VADDR = .;
16645+ BYTE(0)
16646+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16647+ . = ALIGN(HPAGE_SIZE);
16648+ MODULES_EXEC_END = . - 1;
16649+#endif
16650+
16651+ } :module
16652+#endif
16653+
16654+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16655+ /* End of text section */
16656+ _etext = . - __KERNEL_TEXT_OFFSET;
16657+ }
16658+
16659+#ifdef CONFIG_X86_32
16660+ . = ALIGN(PAGE_SIZE);
16661+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16662+ *(.idt)
16663+ . = ALIGN(PAGE_SIZE);
16664+ *(.empty_zero_page)
16665+ *(.initial_pg_fixmap)
16666+ *(.initial_pg_pmd)
16667+ *(.initial_page_table)
16668+ *(.swapper_pg_dir)
16669+ } :rodata
16670+#endif
16671+
16672+ . = ALIGN(PAGE_SIZE);
16673+ NOTES :rodata :note
16674
16675- EXCEPTION_TABLE(16) :text = 0x9090
16676+ EXCEPTION_TABLE(16) :rodata
16677
16678 #if defined(CONFIG_DEBUG_RODATA)
16679 /* .text should occupy whole number of pages */
16680@@ -126,16 +180,20 @@ SECTIONS
16681
16682 /* Data */
16683 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16684+
16685+#ifdef CONFIG_PAX_KERNEXEC
16686+ . = ALIGN(HPAGE_SIZE);
16687+#else
16688+ . = ALIGN(PAGE_SIZE);
16689+#endif
16690+
16691 /* Start of data section */
16692 _sdata = .;
16693
16694 /* init_task */
16695 INIT_TASK_DATA(THREAD_SIZE)
16696
16697-#ifdef CONFIG_X86_32
16698- /* 32 bit has nosave before _edata */
16699 NOSAVE_DATA
16700-#endif
16701
16702 PAGE_ALIGNED_DATA(PAGE_SIZE)
16703
16704@@ -176,12 +234,19 @@ SECTIONS
16705 #endif /* CONFIG_X86_64 */
16706
16707 /* Init code and data - will be freed after init */
16708- . = ALIGN(PAGE_SIZE);
16709 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16710+ BYTE(0)
16711+
16712+#ifdef CONFIG_PAX_KERNEXEC
16713+ . = ALIGN(HPAGE_SIZE);
16714+#else
16715+ . = ALIGN(PAGE_SIZE);
16716+#endif
16717+
16718 __init_begin = .; /* paired with __init_end */
16719- }
16720+ } :init.begin
16721
16722-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16723+#ifdef CONFIG_SMP
16724 /*
16725 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16726 * output PHDR, so the next output section - .init.text - should
16727@@ -190,12 +255,27 @@ SECTIONS
16728 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16729 #endif
16730
16731- INIT_TEXT_SECTION(PAGE_SIZE)
16732-#ifdef CONFIG_X86_64
16733- :init
16734-#endif
16735+ . = ALIGN(PAGE_SIZE);
16736+ init_begin = .;
16737+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16738+ VMLINUX_SYMBOL(_sinittext) = .;
16739+ INIT_TEXT
16740+ VMLINUX_SYMBOL(_einittext) = .;
16741+ . = ALIGN(PAGE_SIZE);
16742+ } :text.init
16743
16744- INIT_DATA_SECTION(16)
16745+ /*
16746+ * .exit.text is discard at runtime, not link time, to deal with
16747+ * references from .altinstructions and .eh_frame
16748+ */
16749+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16750+ EXIT_TEXT
16751+ . = ALIGN(16);
16752+ } :text.exit
16753+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16754+
16755+ . = ALIGN(PAGE_SIZE);
16756+ INIT_DATA_SECTION(16) :init
16757
16758 /*
16759 * Code and data for a variety of lowlevel trampolines, to be
16760@@ -269,19 +349,12 @@ SECTIONS
16761 }
16762
16763 . = ALIGN(8);
16764- /*
16765- * .exit.text is discard at runtime, not link time, to deal with
16766- * references from .altinstructions and .eh_frame
16767- */
16768- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16769- EXIT_TEXT
16770- }
16771
16772 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16773 EXIT_DATA
16774 }
16775
16776-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16777+#ifndef CONFIG_SMP
16778 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16779 #endif
16780
16781@@ -300,16 +373,10 @@ SECTIONS
16782 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16783 __smp_locks = .;
16784 *(.smp_locks)
16785- . = ALIGN(PAGE_SIZE);
16786 __smp_locks_end = .;
16787+ . = ALIGN(PAGE_SIZE);
16788 }
16789
16790-#ifdef CONFIG_X86_64
16791- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16792- NOSAVE_DATA
16793- }
16794-#endif
16795-
16796 /* BSS */
16797 . = ALIGN(PAGE_SIZE);
16798 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16799@@ -325,6 +392,7 @@ SECTIONS
16800 __brk_base = .;
16801 . += 64 * 1024; /* 64k alignment slop space */
16802 *(.brk_reservation) /* areas brk users have reserved */
16803+ . = ALIGN(HPAGE_SIZE);
16804 __brk_limit = .;
16805 }
16806
16807@@ -351,13 +419,12 @@ SECTIONS
16808 * for the boot processor.
16809 */
16810 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16811-INIT_PER_CPU(gdt_page);
16812 INIT_PER_CPU(irq_stack_union);
16813
16814 /*
16815 * Build-time check on the image size:
16816 */
16817-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16818+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16819 "kernel image bigger than KERNEL_IMAGE_SIZE");
16820
16821 #ifdef CONFIG_SMP
16822diff -urNp linux-3.1.1/arch/x86/kernel/vsyscall_64.c linux-3.1.1/arch/x86/kernel/vsyscall_64.c
16823--- linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-11 15:19:27.000000000 -0500
16824+++ linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-16 18:39:07.000000000 -0500
16825@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, v
16826 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16827 };
16828
16829-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
16830+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
16831
16832 static int __init vsyscall_setup(char *str)
16833 {
16834 if (str) {
16835 if (!strcmp("emulate", str))
16836 vsyscall_mode = EMULATE;
16837- else if (!strcmp("native", str))
16838- vsyscall_mode = NATIVE;
16839 else if (!strcmp("none", str))
16840 vsyscall_mode = NONE;
16841 else
16842@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *re
16843
16844 tsk = current;
16845 if (seccomp_mode(&tsk->seccomp))
16846- do_exit(SIGKILL);
16847+ do_group_exit(SIGKILL);
16848
16849 switch (vsyscall_nr) {
16850 case 0:
16851@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *re
16852 return true;
16853
16854 sigsegv:
16855- force_sig(SIGSEGV, current);
16856- return true;
16857+ do_group_exit(SIGKILL);
16858 }
16859
16860 /*
16861@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
16862 extern char __vvar_page;
16863 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
16864
16865- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
16866- vsyscall_mode == NATIVE
16867- ? PAGE_KERNEL_VSYSCALL
16868- : PAGE_KERNEL_VVAR);
16869+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
16870 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
16871 (unsigned long)VSYSCALL_START);
16872
16873diff -urNp linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c
16874--- linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-11 15:19:27.000000000 -0500
16875+++ linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-16 18:39:07.000000000 -0500
16876@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16877 EXPORT_SYMBOL(copy_user_generic_string);
16878 EXPORT_SYMBOL(copy_user_generic_unrolled);
16879 EXPORT_SYMBOL(__copy_user_nocache);
16880-EXPORT_SYMBOL(_copy_from_user);
16881-EXPORT_SYMBOL(_copy_to_user);
16882
16883 EXPORT_SYMBOL(copy_page);
16884 EXPORT_SYMBOL(clear_page);
16885diff -urNp linux-3.1.1/arch/x86/kernel/xsave.c linux-3.1.1/arch/x86/kernel/xsave.c
16886--- linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-11 15:19:27.000000000 -0500
16887+++ linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-16 18:39:07.000000000 -0500
16888@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16889 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16890 return -EINVAL;
16891
16892- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16893+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16894 fx_sw_user->extended_size -
16895 FP_XSTATE_MAGIC2_SIZE));
16896 if (err)
16897@@ -267,7 +267,7 @@ fx_only:
16898 * the other extended state.
16899 */
16900 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16901- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16902+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16903 }
16904
16905 /*
16906@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16907 if (use_xsave())
16908 err = restore_user_xstate(buf);
16909 else
16910- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16911+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16912 buf);
16913 if (unlikely(err)) {
16914 /*
16915diff -urNp linux-3.1.1/arch/x86/kvm/emulate.c linux-3.1.1/arch/x86/kvm/emulate.c
16916--- linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-11 15:19:27.000000000 -0500
16917+++ linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-16 18:39:07.000000000 -0500
16918@@ -96,7 +96,7 @@
16919 #define Src2ImmByte (2<<29)
16920 #define Src2One (3<<29)
16921 #define Src2Imm (4<<29)
16922-#define Src2Mask (7<<29)
16923+#define Src2Mask (7U<<29)
16924
16925 #define X2(x...) x, x
16926 #define X3(x...) X2(x), x
16927@@ -207,6 +207,7 @@ struct gprefix {
16928
16929 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16930 do { \
16931+ unsigned long _tmp; \
16932 __asm__ __volatile__ ( \
16933 _PRE_EFLAGS("0", "4", "2") \
16934 _op _suffix " %"_x"3,%1; " \
16935@@ -220,8 +221,6 @@ struct gprefix {
16936 /* Raw emulation: instruction has two explicit operands. */
16937 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16938 do { \
16939- unsigned long _tmp; \
16940- \
16941 switch ((_dst).bytes) { \
16942 case 2: \
16943 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16944@@ -237,7 +236,6 @@ struct gprefix {
16945
16946 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16947 do { \
16948- unsigned long _tmp; \
16949 switch ((_dst).bytes) { \
16950 case 1: \
16951 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16952diff -urNp linux-3.1.1/arch/x86/kvm/lapic.c linux-3.1.1/arch/x86/kvm/lapic.c
16953--- linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-11 15:19:27.000000000 -0500
16954+++ linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-16 18:39:07.000000000 -0500
16955@@ -53,7 +53,7 @@
16956 #define APIC_BUS_CYCLE_NS 1
16957
16958 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16959-#define apic_debug(fmt, arg...)
16960+#define apic_debug(fmt, arg...) do {} while (0)
16961
16962 #define APIC_LVT_NUM 6
16963 /* 14 is the version for Xeon and Pentium 8.4.8*/
16964diff -urNp linux-3.1.1/arch/x86/kvm/mmu.c linux-3.1.1/arch/x86/kvm/mmu.c
16965--- linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-11 15:19:27.000000000 -0500
16966+++ linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-16 18:39:07.000000000 -0500
16967@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16968
16969 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16970
16971- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16972+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16973
16974 /*
16975 * Assume that the pte write on a page table of the same type
16976@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16977 }
16978
16979 spin_lock(&vcpu->kvm->mmu_lock);
16980- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16981+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16982 gentry = 0;
16983 kvm_mmu_free_some_pages(vcpu);
16984 ++vcpu->kvm->stat.mmu_pte_write;
16985diff -urNp linux-3.1.1/arch/x86/kvm/paging_tmpl.h linux-3.1.1/arch/x86/kvm/paging_tmpl.h
16986--- linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-11 15:19:27.000000000 -0500
16987+++ linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-16 19:40:44.000000000 -0500
16988@@ -197,7 +197,7 @@ retry_walk:
16989 if (unlikely(kvm_is_error_hva(host_addr)))
16990 goto error;
16991
16992- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16993+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16994 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
16995 goto error;
16996
16997@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_
16998 unsigned long mmu_seq;
16999 bool map_writable;
17000
17001+ pax_track_stack();
17002+
17003 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17004
17005 if (unlikely(error_code & PFERR_RSVD_MASK))
17006@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcp
17007 if (need_flush)
17008 kvm_flush_remote_tlbs(vcpu->kvm);
17009
17010- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
17011+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
17012
17013 spin_unlock(&vcpu->kvm->mmu_lock);
17014
17015diff -urNp linux-3.1.1/arch/x86/kvm/svm.c linux-3.1.1/arch/x86/kvm/svm.c
17016--- linux-3.1.1/arch/x86/kvm/svm.c 2011-11-11 15:19:27.000000000 -0500
17017+++ linux-3.1.1/arch/x86/kvm/svm.c 2011-11-16 18:39:07.000000000 -0500
17018@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *
17019 int cpu = raw_smp_processor_id();
17020
17021 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
17022+
17023+ pax_open_kernel();
17024 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
17025+ pax_close_kernel();
17026+
17027 load_TR_desc();
17028 }
17029
17030@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu
17031 #endif
17032 #endif
17033
17034+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17035+ __set_fs(current_thread_info()->addr_limit);
17036+#endif
17037+
17038 reload_tss(vcpu);
17039
17040 local_irq_disable();
17041diff -urNp linux-3.1.1/arch/x86/kvm/vmx.c linux-3.1.1/arch/x86/kvm/vmx.c
17042--- linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-11 15:19:27.000000000 -0500
17043+++ linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-16 18:39:07.000000000 -0500
17044@@ -1251,7 +1251,11 @@ static void reload_tss(void)
17045 struct desc_struct *descs;
17046
17047 descs = (void *)gdt->address;
17048+
17049+ pax_open_kernel();
17050 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17051+ pax_close_kernel();
17052+
17053 load_TR_desc();
17054 }
17055
17056@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
17057 if (!cpu_has_vmx_flexpriority())
17058 flexpriority_enabled = 0;
17059
17060- if (!cpu_has_vmx_tpr_shadow())
17061- kvm_x86_ops->update_cr8_intercept = NULL;
17062+ if (!cpu_has_vmx_tpr_shadow()) {
17063+ pax_open_kernel();
17064+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17065+ pax_close_kernel();
17066+ }
17067
17068 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17069 kvm_disable_largepages();
17070@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(
17071 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
17072
17073 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
17074- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
17075+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
17076
17077 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
17078 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
17079@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struc
17080 "jmp .Lkvm_vmx_return \n\t"
17081 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17082 ".Lkvm_vmx_return: "
17083+
17084+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17085+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17086+ ".Lkvm_vmx_return2: "
17087+#endif
17088+
17089 /* Save guest registers, load host registers, keep flags */
17090 "mov %0, %c[wordsize](%%"R"sp) \n\t"
17091 "pop %0 \n\t"
17092@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struc
17093 #endif
17094 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17095 [wordsize]"i"(sizeof(ulong))
17096+
17097+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17098+ ,[cs]"i"(__KERNEL_CS)
17099+#endif
17100+
17101 : "cc", "memory"
17102 , R"ax", R"bx", R"di", R"si"
17103 #ifdef CONFIG_X86_64
17104@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struc
17105 }
17106 }
17107
17108- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17109+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17110+
17111+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17112+ loadsegment(fs, __KERNEL_PERCPU);
17113+#endif
17114+
17115+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17116+ __set_fs(current_thread_info()->addr_limit);
17117+#endif
17118+
17119 vmx->loaded_vmcs->launched = 1;
17120
17121 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17122diff -urNp linux-3.1.1/arch/x86/kvm/x86.c linux-3.1.1/arch/x86/kvm/x86.c
17123--- linux-3.1.1/arch/x86/kvm/x86.c 2011-11-11 15:19:27.000000000 -0500
17124+++ linux-3.1.1/arch/x86/kvm/x86.c 2011-11-16 18:39:07.000000000 -0500
17125@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcp
17126 {
17127 struct kvm *kvm = vcpu->kvm;
17128 int lm = is_long_mode(vcpu);
17129- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17130- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17131+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17132+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17133 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17134 : kvm->arch.xen_hvm_config.blob_size_32;
17135 u32 page_num = data & ~PAGE_MASK;
17136@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17137 if (n < msr_list.nmsrs)
17138 goto out;
17139 r = -EFAULT;
17140+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17141+ goto out;
17142 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17143 num_msrs_to_save * sizeof(u32)))
17144 goto out;
17145@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17146 struct kvm_cpuid2 *cpuid,
17147 struct kvm_cpuid_entry2 __user *entries)
17148 {
17149- int r;
17150+ int r, i;
17151
17152 r = -E2BIG;
17153 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17154 goto out;
17155 r = -EFAULT;
17156- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17157- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17158+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17159 goto out;
17160+ for (i = 0; i < cpuid->nent; ++i) {
17161+ struct kvm_cpuid_entry2 cpuid_entry;
17162+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17163+ goto out;
17164+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17165+ }
17166 vcpu->arch.cpuid_nent = cpuid->nent;
17167 kvm_apic_set_version(vcpu);
17168 kvm_x86_ops->cpuid_update(vcpu);
17169@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17170 struct kvm_cpuid2 *cpuid,
17171 struct kvm_cpuid_entry2 __user *entries)
17172 {
17173- int r;
17174+ int r, i;
17175
17176 r = -E2BIG;
17177 if (cpuid->nent < vcpu->arch.cpuid_nent)
17178 goto out;
17179 r = -EFAULT;
17180- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17181- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17182+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17183 goto out;
17184+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17185+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17186+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17187+ goto out;
17188+ }
17189 return 0;
17190
17191 out:
17192@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17193 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17194 struct kvm_interrupt *irq)
17195 {
17196- if (irq->irq < 0 || irq->irq >= 256)
17197+ if (irq->irq >= 256)
17198 return -EINVAL;
17199 if (irqchip_in_kernel(vcpu->kvm))
17200 return -ENXIO;
17201@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
17202 kvm_mmu_set_mmio_spte_mask(mask);
17203 }
17204
17205-int kvm_arch_init(void *opaque)
17206+int kvm_arch_init(const void *opaque)
17207 {
17208 int r;
17209 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17210diff -urNp linux-3.1.1/arch/x86/lguest/boot.c linux-3.1.1/arch/x86/lguest/boot.c
17211--- linux-3.1.1/arch/x86/lguest/boot.c 2011-11-11 15:19:27.000000000 -0500
17212+++ linux-3.1.1/arch/x86/lguest/boot.c 2011-11-16 18:39:07.000000000 -0500
17213@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vt
17214 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17215 * Launcher to reboot us.
17216 */
17217-static void lguest_restart(char *reason)
17218+static __noreturn void lguest_restart(char *reason)
17219 {
17220 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17221+ BUG();
17222 }
17223
17224 /*G:050
17225diff -urNp linux-3.1.1/arch/x86/lib/atomic64_32.c linux-3.1.1/arch/x86/lib/atomic64_32.c
17226--- linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-11 15:19:27.000000000 -0500
17227+++ linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-16 18:39:07.000000000 -0500
17228@@ -8,18 +8,30 @@
17229
17230 long long atomic64_read_cx8(long long, const atomic64_t *v);
17231 EXPORT_SYMBOL(atomic64_read_cx8);
17232+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17233+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17234 long long atomic64_set_cx8(long long, const atomic64_t *v);
17235 EXPORT_SYMBOL(atomic64_set_cx8);
17236+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17237+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17238 long long atomic64_xchg_cx8(long long, unsigned high);
17239 EXPORT_SYMBOL(atomic64_xchg_cx8);
17240 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17241 EXPORT_SYMBOL(atomic64_add_return_cx8);
17242+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17243+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17244 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17245 EXPORT_SYMBOL(atomic64_sub_return_cx8);
17246+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17247+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17248 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17249 EXPORT_SYMBOL(atomic64_inc_return_cx8);
17250+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17251+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17252 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17253 EXPORT_SYMBOL(atomic64_dec_return_cx8);
17254+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17255+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17256 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17257 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17258 int atomic64_inc_not_zero_cx8(atomic64_t *v);
17259@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17260 #ifndef CONFIG_X86_CMPXCHG64
17261 long long atomic64_read_386(long long, const atomic64_t *v);
17262 EXPORT_SYMBOL(atomic64_read_386);
17263+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17264+EXPORT_SYMBOL(atomic64_read_unchecked_386);
17265 long long atomic64_set_386(long long, const atomic64_t *v);
17266 EXPORT_SYMBOL(atomic64_set_386);
17267+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17268+EXPORT_SYMBOL(atomic64_set_unchecked_386);
17269 long long atomic64_xchg_386(long long, unsigned high);
17270 EXPORT_SYMBOL(atomic64_xchg_386);
17271 long long atomic64_add_return_386(long long a, atomic64_t *v);
17272 EXPORT_SYMBOL(atomic64_add_return_386);
17273+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17274+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17275 long long atomic64_sub_return_386(long long a, atomic64_t *v);
17276 EXPORT_SYMBOL(atomic64_sub_return_386);
17277+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17278+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17279 long long atomic64_inc_return_386(long long a, atomic64_t *v);
17280 EXPORT_SYMBOL(atomic64_inc_return_386);
17281+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17282+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17283 long long atomic64_dec_return_386(long long a, atomic64_t *v);
17284 EXPORT_SYMBOL(atomic64_dec_return_386);
17285+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17286+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17287 long long atomic64_add_386(long long a, atomic64_t *v);
17288 EXPORT_SYMBOL(atomic64_add_386);
17289+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17290+EXPORT_SYMBOL(atomic64_add_unchecked_386);
17291 long long atomic64_sub_386(long long a, atomic64_t *v);
17292 EXPORT_SYMBOL(atomic64_sub_386);
17293+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17294+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17295 long long atomic64_inc_386(long long a, atomic64_t *v);
17296 EXPORT_SYMBOL(atomic64_inc_386);
17297+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17298+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17299 long long atomic64_dec_386(long long a, atomic64_t *v);
17300 EXPORT_SYMBOL(atomic64_dec_386);
17301+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17302+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17303 long long atomic64_dec_if_positive_386(atomic64_t *v);
17304 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17305 int atomic64_inc_not_zero_386(atomic64_t *v);
17306diff -urNp linux-3.1.1/arch/x86/lib/atomic64_386_32.S linux-3.1.1/arch/x86/lib/atomic64_386_32.S
17307--- linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-11 15:19:27.000000000 -0500
17308+++ linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-16 18:39:07.000000000 -0500
17309@@ -48,6 +48,10 @@ BEGIN(read)
17310 movl (v), %eax
17311 movl 4(v), %edx
17312 RET_ENDP
17313+BEGIN(read_unchecked)
17314+ movl (v), %eax
17315+ movl 4(v), %edx
17316+RET_ENDP
17317 #undef v
17318
17319 #define v %esi
17320@@ -55,6 +59,10 @@ BEGIN(set)
17321 movl %ebx, (v)
17322 movl %ecx, 4(v)
17323 RET_ENDP
17324+BEGIN(set_unchecked)
17325+ movl %ebx, (v)
17326+ movl %ecx, 4(v)
17327+RET_ENDP
17328 #undef v
17329
17330 #define v %esi
17331@@ -70,6 +78,20 @@ RET_ENDP
17332 BEGIN(add)
17333 addl %eax, (v)
17334 adcl %edx, 4(v)
17335+
17336+#ifdef CONFIG_PAX_REFCOUNT
17337+ jno 0f
17338+ subl %eax, (v)
17339+ sbbl %edx, 4(v)
17340+ int $4
17341+0:
17342+ _ASM_EXTABLE(0b, 0b)
17343+#endif
17344+
17345+RET_ENDP
17346+BEGIN(add_unchecked)
17347+ addl %eax, (v)
17348+ adcl %edx, 4(v)
17349 RET_ENDP
17350 #undef v
17351
17352@@ -77,6 +99,24 @@ RET_ENDP
17353 BEGIN(add_return)
17354 addl (v), %eax
17355 adcl 4(v), %edx
17356+
17357+#ifdef CONFIG_PAX_REFCOUNT
17358+ into
17359+1234:
17360+ _ASM_EXTABLE(1234b, 2f)
17361+#endif
17362+
17363+ movl %eax, (v)
17364+ movl %edx, 4(v)
17365+
17366+#ifdef CONFIG_PAX_REFCOUNT
17367+2:
17368+#endif
17369+
17370+RET_ENDP
17371+BEGIN(add_return_unchecked)
17372+ addl (v), %eax
17373+ adcl 4(v), %edx
17374 movl %eax, (v)
17375 movl %edx, 4(v)
17376 RET_ENDP
17377@@ -86,6 +126,20 @@ RET_ENDP
17378 BEGIN(sub)
17379 subl %eax, (v)
17380 sbbl %edx, 4(v)
17381+
17382+#ifdef CONFIG_PAX_REFCOUNT
17383+ jno 0f
17384+ addl %eax, (v)
17385+ adcl %edx, 4(v)
17386+ int $4
17387+0:
17388+ _ASM_EXTABLE(0b, 0b)
17389+#endif
17390+
17391+RET_ENDP
17392+BEGIN(sub_unchecked)
17393+ subl %eax, (v)
17394+ sbbl %edx, 4(v)
17395 RET_ENDP
17396 #undef v
17397
17398@@ -96,6 +150,27 @@ BEGIN(sub_return)
17399 sbbl $0, %edx
17400 addl (v), %eax
17401 adcl 4(v), %edx
17402+
17403+#ifdef CONFIG_PAX_REFCOUNT
17404+ into
17405+1234:
17406+ _ASM_EXTABLE(1234b, 2f)
17407+#endif
17408+
17409+ movl %eax, (v)
17410+ movl %edx, 4(v)
17411+
17412+#ifdef CONFIG_PAX_REFCOUNT
17413+2:
17414+#endif
17415+
17416+RET_ENDP
17417+BEGIN(sub_return_unchecked)
17418+ negl %edx
17419+ negl %eax
17420+ sbbl $0, %edx
17421+ addl (v), %eax
17422+ adcl 4(v), %edx
17423 movl %eax, (v)
17424 movl %edx, 4(v)
17425 RET_ENDP
17426@@ -105,6 +180,20 @@ RET_ENDP
17427 BEGIN(inc)
17428 addl $1, (v)
17429 adcl $0, 4(v)
17430+
17431+#ifdef CONFIG_PAX_REFCOUNT
17432+ jno 0f
17433+ subl $1, (v)
17434+ sbbl $0, 4(v)
17435+ int $4
17436+0:
17437+ _ASM_EXTABLE(0b, 0b)
17438+#endif
17439+
17440+RET_ENDP
17441+BEGIN(inc_unchecked)
17442+ addl $1, (v)
17443+ adcl $0, 4(v)
17444 RET_ENDP
17445 #undef v
17446
17447@@ -114,6 +203,26 @@ BEGIN(inc_return)
17448 movl 4(v), %edx
17449 addl $1, %eax
17450 adcl $0, %edx
17451+
17452+#ifdef CONFIG_PAX_REFCOUNT
17453+ into
17454+1234:
17455+ _ASM_EXTABLE(1234b, 2f)
17456+#endif
17457+
17458+ movl %eax, (v)
17459+ movl %edx, 4(v)
17460+
17461+#ifdef CONFIG_PAX_REFCOUNT
17462+2:
17463+#endif
17464+
17465+RET_ENDP
17466+BEGIN(inc_return_unchecked)
17467+ movl (v), %eax
17468+ movl 4(v), %edx
17469+ addl $1, %eax
17470+ adcl $0, %edx
17471 movl %eax, (v)
17472 movl %edx, 4(v)
17473 RET_ENDP
17474@@ -123,6 +232,20 @@ RET_ENDP
17475 BEGIN(dec)
17476 subl $1, (v)
17477 sbbl $0, 4(v)
17478+
17479+#ifdef CONFIG_PAX_REFCOUNT
17480+ jno 0f
17481+ addl $1, (v)
17482+ adcl $0, 4(v)
17483+ int $4
17484+0:
17485+ _ASM_EXTABLE(0b, 0b)
17486+#endif
17487+
17488+RET_ENDP
17489+BEGIN(dec_unchecked)
17490+ subl $1, (v)
17491+ sbbl $0, 4(v)
17492 RET_ENDP
17493 #undef v
17494
17495@@ -132,6 +255,26 @@ BEGIN(dec_return)
17496 movl 4(v), %edx
17497 subl $1, %eax
17498 sbbl $0, %edx
17499+
17500+#ifdef CONFIG_PAX_REFCOUNT
17501+ into
17502+1234:
17503+ _ASM_EXTABLE(1234b, 2f)
17504+#endif
17505+
17506+ movl %eax, (v)
17507+ movl %edx, 4(v)
17508+
17509+#ifdef CONFIG_PAX_REFCOUNT
17510+2:
17511+#endif
17512+
17513+RET_ENDP
17514+BEGIN(dec_return_unchecked)
17515+ movl (v), %eax
17516+ movl 4(v), %edx
17517+ subl $1, %eax
17518+ sbbl $0, %edx
17519 movl %eax, (v)
17520 movl %edx, 4(v)
17521 RET_ENDP
17522@@ -143,6 +286,13 @@ BEGIN(add_unless)
17523 adcl %edx, %edi
17524 addl (v), %eax
17525 adcl 4(v), %edx
17526+
17527+#ifdef CONFIG_PAX_REFCOUNT
17528+ into
17529+1234:
17530+ _ASM_EXTABLE(1234b, 2f)
17531+#endif
17532+
17533 cmpl %eax, %esi
17534 je 3f
17535 1:
17536@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17537 1:
17538 addl $1, %eax
17539 adcl $0, %edx
17540+
17541+#ifdef CONFIG_PAX_REFCOUNT
17542+ into
17543+1234:
17544+ _ASM_EXTABLE(1234b, 2f)
17545+#endif
17546+
17547 movl %eax, (v)
17548 movl %edx, 4(v)
17549 movl $1, %eax
17550@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17551 movl 4(v), %edx
17552 subl $1, %eax
17553 sbbl $0, %edx
17554+
17555+#ifdef CONFIG_PAX_REFCOUNT
17556+ into
17557+1234:
17558+ _ASM_EXTABLE(1234b, 1f)
17559+#endif
17560+
17561 js 1f
17562 movl %eax, (v)
17563 movl %edx, 4(v)
17564diff -urNp linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S
17565--- linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-11 15:19:27.000000000 -0500
17566+++ linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-16 18:39:07.000000000 -0500
17567@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17568 CFI_STARTPROC
17569
17570 read64 %ecx
17571+ pax_force_retaddr
17572 ret
17573 CFI_ENDPROC
17574 ENDPROC(atomic64_read_cx8)
17575
17576+ENTRY(atomic64_read_unchecked_cx8)
17577+ CFI_STARTPROC
17578+
17579+ read64 %ecx
17580+ pax_force_retaddr
17581+ ret
17582+ CFI_ENDPROC
17583+ENDPROC(atomic64_read_unchecked_cx8)
17584+
17585 ENTRY(atomic64_set_cx8)
17586 CFI_STARTPROC
17587
17588@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17589 cmpxchg8b (%esi)
17590 jne 1b
17591
17592+ pax_force_retaddr
17593 ret
17594 CFI_ENDPROC
17595 ENDPROC(atomic64_set_cx8)
17596
17597+ENTRY(atomic64_set_unchecked_cx8)
17598+ CFI_STARTPROC
17599+
17600+1:
17601+/* we don't need LOCK_PREFIX since aligned 64-bit writes
17602+ * are atomic on 586 and newer */
17603+ cmpxchg8b (%esi)
17604+ jne 1b
17605+
17606+ pax_force_retaddr
17607+ ret
17608+ CFI_ENDPROC
17609+ENDPROC(atomic64_set_unchecked_cx8)
17610+
17611 ENTRY(atomic64_xchg_cx8)
17612 CFI_STARTPROC
17613
17614@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17615 cmpxchg8b (%esi)
17616 jne 1b
17617
17618+ pax_force_retaddr
17619 ret
17620 CFI_ENDPROC
17621 ENDPROC(atomic64_xchg_cx8)
17622
17623-.macro addsub_return func ins insc
17624-ENTRY(atomic64_\func\()_return_cx8)
17625+.macro addsub_return func ins insc unchecked=""
17626+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17627 CFI_STARTPROC
17628 SAVE ebp
17629 SAVE ebx
17630@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17631 movl %edx, %ecx
17632 \ins\()l %esi, %ebx
17633 \insc\()l %edi, %ecx
17634+
17635+.ifb \unchecked
17636+#ifdef CONFIG_PAX_REFCOUNT
17637+ into
17638+2:
17639+ _ASM_EXTABLE(2b, 3f)
17640+#endif
17641+.endif
17642+
17643 LOCK_PREFIX
17644 cmpxchg8b (%ebp)
17645 jne 1b
17646-
17647-10:
17648 movl %ebx, %eax
17649 movl %ecx, %edx
17650+
17651+.ifb \unchecked
17652+#ifdef CONFIG_PAX_REFCOUNT
17653+3:
17654+#endif
17655+.endif
17656+
17657 RESTORE edi
17658 RESTORE esi
17659 RESTORE ebx
17660 RESTORE ebp
17661+ pax_force_retaddr
17662 ret
17663 CFI_ENDPROC
17664-ENDPROC(atomic64_\func\()_return_cx8)
17665+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17666 .endm
17667
17668 addsub_return add add adc
17669 addsub_return sub sub sbb
17670+addsub_return add add adc _unchecked
17671+addsub_return sub sub sbb _unchecked
17672
17673-.macro incdec_return func ins insc
17674-ENTRY(atomic64_\func\()_return_cx8)
17675+.macro incdec_return func ins insc unchecked
17676+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17677 CFI_STARTPROC
17678 SAVE ebx
17679
17680@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17681 movl %edx, %ecx
17682 \ins\()l $1, %ebx
17683 \insc\()l $0, %ecx
17684+
17685+.ifb \unchecked
17686+#ifdef CONFIG_PAX_REFCOUNT
17687+ into
17688+2:
17689+ _ASM_EXTABLE(2b, 3f)
17690+#endif
17691+.endif
17692+
17693 LOCK_PREFIX
17694 cmpxchg8b (%esi)
17695 jne 1b
17696
17697-10:
17698 movl %ebx, %eax
17699 movl %ecx, %edx
17700+
17701+.ifb \unchecked
17702+#ifdef CONFIG_PAX_REFCOUNT
17703+3:
17704+#endif
17705+.endif
17706+
17707 RESTORE ebx
17708+ pax_force_retaddr
17709 ret
17710 CFI_ENDPROC
17711-ENDPROC(atomic64_\func\()_return_cx8)
17712+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17713 .endm
17714
17715 incdec_return inc add adc
17716 incdec_return dec sub sbb
17717+incdec_return inc add adc _unchecked
17718+incdec_return dec sub sbb _unchecked
17719
17720 ENTRY(atomic64_dec_if_positive_cx8)
17721 CFI_STARTPROC
17722@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17723 movl %edx, %ecx
17724 subl $1, %ebx
17725 sbb $0, %ecx
17726+
17727+#ifdef CONFIG_PAX_REFCOUNT
17728+ into
17729+1234:
17730+ _ASM_EXTABLE(1234b, 2f)
17731+#endif
17732+
17733 js 2f
17734 LOCK_PREFIX
17735 cmpxchg8b (%esi)
17736@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17737 movl %ebx, %eax
17738 movl %ecx, %edx
17739 RESTORE ebx
17740+ pax_force_retaddr
17741 ret
17742 CFI_ENDPROC
17743 ENDPROC(atomic64_dec_if_positive_cx8)
17744@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17745 movl %edx, %ecx
17746 addl %esi, %ebx
17747 adcl %edi, %ecx
17748+
17749+#ifdef CONFIG_PAX_REFCOUNT
17750+ into
17751+1234:
17752+ _ASM_EXTABLE(1234b, 3f)
17753+#endif
17754+
17755 LOCK_PREFIX
17756 cmpxchg8b (%ebp)
17757 jne 1b
17758@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17759 CFI_ADJUST_CFA_OFFSET -8
17760 RESTORE ebx
17761 RESTORE ebp
17762+ pax_force_retaddr
17763 ret
17764 4:
17765 cmpl %edx, 4(%esp)
17766@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17767 movl %edx, %ecx
17768 addl $1, %ebx
17769 adcl $0, %ecx
17770+
17771+#ifdef CONFIG_PAX_REFCOUNT
17772+ into
17773+1234:
17774+ _ASM_EXTABLE(1234b, 3f)
17775+#endif
17776+
17777 LOCK_PREFIX
17778 cmpxchg8b (%esi)
17779 jne 1b
17780@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17781 movl $1, %eax
17782 3:
17783 RESTORE ebx
17784+ pax_force_retaddr
17785 ret
17786 4:
17787 testl %edx, %edx
17788diff -urNp linux-3.1.1/arch/x86/lib/checksum_32.S linux-3.1.1/arch/x86/lib/checksum_32.S
17789--- linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-11 15:19:27.000000000 -0500
17790+++ linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-16 18:39:07.000000000 -0500
17791@@ -28,7 +28,8 @@
17792 #include <linux/linkage.h>
17793 #include <asm/dwarf2.h>
17794 #include <asm/errno.h>
17795-
17796+#include <asm/segment.h>
17797+
17798 /*
17799 * computes a partial checksum, e.g. for TCP/UDP fragments
17800 */
17801@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17802
17803 #define ARGBASE 16
17804 #define FP 12
17805-
17806-ENTRY(csum_partial_copy_generic)
17807+
17808+ENTRY(csum_partial_copy_generic_to_user)
17809 CFI_STARTPROC
17810+
17811+#ifdef CONFIG_PAX_MEMORY_UDEREF
17812+ pushl_cfi %gs
17813+ popl_cfi %es
17814+ jmp csum_partial_copy_generic
17815+#endif
17816+
17817+ENTRY(csum_partial_copy_generic_from_user)
17818+
17819+#ifdef CONFIG_PAX_MEMORY_UDEREF
17820+ pushl_cfi %gs
17821+ popl_cfi %ds
17822+#endif
17823+
17824+ENTRY(csum_partial_copy_generic)
17825 subl $4,%esp
17826 CFI_ADJUST_CFA_OFFSET 4
17827 pushl_cfi %edi
17828@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17829 jmp 4f
17830 SRC(1: movw (%esi), %bx )
17831 addl $2, %esi
17832-DST( movw %bx, (%edi) )
17833+DST( movw %bx, %es:(%edi) )
17834 addl $2, %edi
17835 addw %bx, %ax
17836 adcl $0, %eax
17837@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17838 SRC(1: movl (%esi), %ebx )
17839 SRC( movl 4(%esi), %edx )
17840 adcl %ebx, %eax
17841-DST( movl %ebx, (%edi) )
17842+DST( movl %ebx, %es:(%edi) )
17843 adcl %edx, %eax
17844-DST( movl %edx, 4(%edi) )
17845+DST( movl %edx, %es:4(%edi) )
17846
17847 SRC( movl 8(%esi), %ebx )
17848 SRC( movl 12(%esi), %edx )
17849 adcl %ebx, %eax
17850-DST( movl %ebx, 8(%edi) )
17851+DST( movl %ebx, %es:8(%edi) )
17852 adcl %edx, %eax
17853-DST( movl %edx, 12(%edi) )
17854+DST( movl %edx, %es:12(%edi) )
17855
17856 SRC( movl 16(%esi), %ebx )
17857 SRC( movl 20(%esi), %edx )
17858 adcl %ebx, %eax
17859-DST( movl %ebx, 16(%edi) )
17860+DST( movl %ebx, %es:16(%edi) )
17861 adcl %edx, %eax
17862-DST( movl %edx, 20(%edi) )
17863+DST( movl %edx, %es:20(%edi) )
17864
17865 SRC( movl 24(%esi), %ebx )
17866 SRC( movl 28(%esi), %edx )
17867 adcl %ebx, %eax
17868-DST( movl %ebx, 24(%edi) )
17869+DST( movl %ebx, %es:24(%edi) )
17870 adcl %edx, %eax
17871-DST( movl %edx, 28(%edi) )
17872+DST( movl %edx, %es:28(%edi) )
17873
17874 lea 32(%esi), %esi
17875 lea 32(%edi), %edi
17876@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17877 shrl $2, %edx # This clears CF
17878 SRC(3: movl (%esi), %ebx )
17879 adcl %ebx, %eax
17880-DST( movl %ebx, (%edi) )
17881+DST( movl %ebx, %es:(%edi) )
17882 lea 4(%esi), %esi
17883 lea 4(%edi), %edi
17884 dec %edx
17885@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17886 jb 5f
17887 SRC( movw (%esi), %cx )
17888 leal 2(%esi), %esi
17889-DST( movw %cx, (%edi) )
17890+DST( movw %cx, %es:(%edi) )
17891 leal 2(%edi), %edi
17892 je 6f
17893 shll $16,%ecx
17894 SRC(5: movb (%esi), %cl )
17895-DST( movb %cl, (%edi) )
17896+DST( movb %cl, %es:(%edi) )
17897 6: addl %ecx, %eax
17898 adcl $0, %eax
17899 7:
17900@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17901
17902 6001:
17903 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17904- movl $-EFAULT, (%ebx)
17905+ movl $-EFAULT, %ss:(%ebx)
17906
17907 # zero the complete destination - computing the rest
17908 # is too much work
17909@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17910
17911 6002:
17912 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17913- movl $-EFAULT,(%ebx)
17914+ movl $-EFAULT,%ss:(%ebx)
17915 jmp 5000b
17916
17917 .previous
17918
17919+ pushl_cfi %ss
17920+ popl_cfi %ds
17921+ pushl_cfi %ss
17922+ popl_cfi %es
17923 popl_cfi %ebx
17924 CFI_RESTORE ebx
17925 popl_cfi %esi
17926@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17927 popl_cfi %ecx # equivalent to addl $4,%esp
17928 ret
17929 CFI_ENDPROC
17930-ENDPROC(csum_partial_copy_generic)
17931+ENDPROC(csum_partial_copy_generic_to_user)
17932
17933 #else
17934
17935 /* Version for PentiumII/PPro */
17936
17937 #define ROUND1(x) \
17938+ nop; nop; nop; \
17939 SRC(movl x(%esi), %ebx ) ; \
17940 addl %ebx, %eax ; \
17941- DST(movl %ebx, x(%edi) ) ;
17942+ DST(movl %ebx, %es:x(%edi)) ;
17943
17944 #define ROUND(x) \
17945+ nop; nop; nop; \
17946 SRC(movl x(%esi), %ebx ) ; \
17947 adcl %ebx, %eax ; \
17948- DST(movl %ebx, x(%edi) ) ;
17949+ DST(movl %ebx, %es:x(%edi)) ;
17950
17951 #define ARGBASE 12
17952-
17953-ENTRY(csum_partial_copy_generic)
17954+
17955+ENTRY(csum_partial_copy_generic_to_user)
17956 CFI_STARTPROC
17957+
17958+#ifdef CONFIG_PAX_MEMORY_UDEREF
17959+ pushl_cfi %gs
17960+ popl_cfi %es
17961+ jmp csum_partial_copy_generic
17962+#endif
17963+
17964+ENTRY(csum_partial_copy_generic_from_user)
17965+
17966+#ifdef CONFIG_PAX_MEMORY_UDEREF
17967+ pushl_cfi %gs
17968+ popl_cfi %ds
17969+#endif
17970+
17971+ENTRY(csum_partial_copy_generic)
17972 pushl_cfi %ebx
17973 CFI_REL_OFFSET ebx, 0
17974 pushl_cfi %edi
17975@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17976 subl %ebx, %edi
17977 lea -1(%esi),%edx
17978 andl $-32,%edx
17979- lea 3f(%ebx,%ebx), %ebx
17980+ lea 3f(%ebx,%ebx,2), %ebx
17981 testl %esi, %esi
17982 jmp *%ebx
17983 1: addl $64,%esi
17984@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17985 jb 5f
17986 SRC( movw (%esi), %dx )
17987 leal 2(%esi), %esi
17988-DST( movw %dx, (%edi) )
17989+DST( movw %dx, %es:(%edi) )
17990 leal 2(%edi), %edi
17991 je 6f
17992 shll $16,%edx
17993 5:
17994 SRC( movb (%esi), %dl )
17995-DST( movb %dl, (%edi) )
17996+DST( movb %dl, %es:(%edi) )
17997 6: addl %edx, %eax
17998 adcl $0, %eax
17999 7:
18000 .section .fixup, "ax"
18001 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18002- movl $-EFAULT, (%ebx)
18003+ movl $-EFAULT, %ss:(%ebx)
18004 # zero the complete destination (computing the rest is too much work)
18005 movl ARGBASE+8(%esp),%edi # dst
18006 movl ARGBASE+12(%esp),%ecx # len
18007@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
18008 rep; stosb
18009 jmp 7b
18010 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18011- movl $-EFAULT, (%ebx)
18012+ movl $-EFAULT, %ss:(%ebx)
18013 jmp 7b
18014 .previous
18015
18016+#ifdef CONFIG_PAX_MEMORY_UDEREF
18017+ pushl_cfi %ss
18018+ popl_cfi %ds
18019+ pushl_cfi %ss
18020+ popl_cfi %es
18021+#endif
18022+
18023 popl_cfi %esi
18024 CFI_RESTORE esi
18025 popl_cfi %edi
18026@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
18027 CFI_RESTORE ebx
18028 ret
18029 CFI_ENDPROC
18030-ENDPROC(csum_partial_copy_generic)
18031+ENDPROC(csum_partial_copy_generic_to_user)
18032
18033 #undef ROUND
18034 #undef ROUND1
18035diff -urNp linux-3.1.1/arch/x86/lib/clear_page_64.S linux-3.1.1/arch/x86/lib/clear_page_64.S
18036--- linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-11 15:19:27.000000000 -0500
18037+++ linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-16 18:39:07.000000000 -0500
18038@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
18039 movl $4096/8,%ecx
18040 xorl %eax,%eax
18041 rep stosq
18042+ pax_force_retaddr
18043 ret
18044 CFI_ENDPROC
18045 ENDPROC(clear_page_c)
18046@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
18047 movl $4096,%ecx
18048 xorl %eax,%eax
18049 rep stosb
18050+ pax_force_retaddr
18051 ret
18052 CFI_ENDPROC
18053 ENDPROC(clear_page_c_e)
18054@@ -43,6 +45,7 @@ ENTRY(clear_page)
18055 leaq 64(%rdi),%rdi
18056 jnz .Lloop
18057 nop
18058+ pax_force_retaddr
18059 ret
18060 CFI_ENDPROC
18061 .Lclear_page_end:
18062@@ -58,7 +61,7 @@ ENDPROC(clear_page)
18063
18064 #include <asm/cpufeature.h>
18065
18066- .section .altinstr_replacement,"ax"
18067+ .section .altinstr_replacement,"a"
18068 1: .byte 0xeb /* jmp <disp8> */
18069 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18070 2: .byte 0xeb /* jmp <disp8> */
18071diff -urNp linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S
18072--- linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-11 15:19:27.000000000 -0500
18073+++ linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-16 18:39:07.000000000 -0500
18074@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
18075
18076 popf
18077 mov $1, %al
18078+ pax_force_retaddr
18079 ret
18080
18081 not_same:
18082 popf
18083 xor %al,%al
18084+ pax_force_retaddr
18085 ret
18086
18087 CFI_ENDPROC
18088diff -urNp linux-3.1.1/arch/x86/lib/copy_page_64.S linux-3.1.1/arch/x86/lib/copy_page_64.S
18089--- linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-11 15:19:27.000000000 -0500
18090+++ linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-16 18:39:07.000000000 -0500
18091@@ -9,6 +9,7 @@ copy_page_c:
18092 CFI_STARTPROC
18093 movl $4096/8,%ecx
18094 rep movsq
18095+ pax_force_retaddr
18096 ret
18097 CFI_ENDPROC
18098 ENDPROC(copy_page_c)
18099@@ -95,6 +96,7 @@ ENTRY(copy_page)
18100 CFI_RESTORE r13
18101 addq $3*8,%rsp
18102 CFI_ADJUST_CFA_OFFSET -3*8
18103+ pax_force_retaddr
18104 ret
18105 .Lcopy_page_end:
18106 CFI_ENDPROC
18107@@ -105,7 +107,7 @@ ENDPROC(copy_page)
18108
18109 #include <asm/cpufeature.h>
18110
18111- .section .altinstr_replacement,"ax"
18112+ .section .altinstr_replacement,"a"
18113 1: .byte 0xeb /* jmp <disp8> */
18114 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18115 2:
18116diff -urNp linux-3.1.1/arch/x86/lib/copy_user_64.S linux-3.1.1/arch/x86/lib/copy_user_64.S
18117--- linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-11 15:19:27.000000000 -0500
18118+++ linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-16 18:39:07.000000000 -0500
18119@@ -16,6 +16,7 @@
18120 #include <asm/thread_info.h>
18121 #include <asm/cpufeature.h>
18122 #include <asm/alternative-asm.h>
18123+#include <asm/pgtable.h>
18124
18125 /*
18126 * By placing feature2 after feature1 in altinstructions section, we logically
18127@@ -29,7 +30,7 @@
18128 .byte 0xe9 /* 32bit jump */
18129 .long \orig-1f /* by default jump to orig */
18130 1:
18131- .section .altinstr_replacement,"ax"
18132+ .section .altinstr_replacement,"a"
18133 2: .byte 0xe9 /* near jump with 32bit immediate */
18134 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18135 3: .byte 0xe9 /* near jump with 32bit immediate */
18136@@ -71,47 +72,20 @@
18137 #endif
18138 .endm
18139
18140-/* Standard copy_to_user with segment limit checking */
18141-ENTRY(_copy_to_user)
18142- CFI_STARTPROC
18143- GET_THREAD_INFO(%rax)
18144- movq %rdi,%rcx
18145- addq %rdx,%rcx
18146- jc bad_to_user
18147- cmpq TI_addr_limit(%rax),%rcx
18148- ja bad_to_user
18149- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18150- copy_user_generic_unrolled,copy_user_generic_string, \
18151- copy_user_enhanced_fast_string
18152- CFI_ENDPROC
18153-ENDPROC(_copy_to_user)
18154-
18155-/* Standard copy_from_user with segment limit checking */
18156-ENTRY(_copy_from_user)
18157- CFI_STARTPROC
18158- GET_THREAD_INFO(%rax)
18159- movq %rsi,%rcx
18160- addq %rdx,%rcx
18161- jc bad_from_user
18162- cmpq TI_addr_limit(%rax),%rcx
18163- ja bad_from_user
18164- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18165- copy_user_generic_unrolled,copy_user_generic_string, \
18166- copy_user_enhanced_fast_string
18167- CFI_ENDPROC
18168-ENDPROC(_copy_from_user)
18169-
18170 .section .fixup,"ax"
18171 /* must zero dest */
18172 ENTRY(bad_from_user)
18173 bad_from_user:
18174 CFI_STARTPROC
18175+ testl %edx,%edx
18176+ js bad_to_user
18177 movl %edx,%ecx
18178 xorl %eax,%eax
18179 rep
18180 stosb
18181 bad_to_user:
18182 movl %edx,%eax
18183+ pax_force_retaddr
18184 ret
18185 CFI_ENDPROC
18186 ENDPROC(bad_from_user)
18187@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18188 decl %ecx
18189 jnz 21b
18190 23: xor %eax,%eax
18191+ pax_force_retaddr
18192 ret
18193
18194 .section .fixup,"ax"
18195@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18196 3: rep
18197 movsb
18198 4: xorl %eax,%eax
18199+ pax_force_retaddr
18200 ret
18201
18202 .section .fixup,"ax"
18203@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18204 1: rep
18205 movsb
18206 2: xorl %eax,%eax
18207+ pax_force_retaddr
18208 ret
18209
18210 .section .fixup,"ax"
18211diff -urNp linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S
18212--- linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-11 15:19:27.000000000 -0500
18213+++ linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-16 18:39:07.000000000 -0500
18214@@ -8,12 +8,14 @@
18215
18216 #include <linux/linkage.h>
18217 #include <asm/dwarf2.h>
18218+#include <asm/alternative-asm.h>
18219
18220 #define FIX_ALIGNMENT 1
18221
18222 #include <asm/current.h>
18223 #include <asm/asm-offsets.h>
18224 #include <asm/thread_info.h>
18225+#include <asm/pgtable.h>
18226
18227 .macro ALIGN_DESTINATION
18228 #ifdef FIX_ALIGNMENT
18229@@ -50,6 +52,15 @@
18230 */
18231 ENTRY(__copy_user_nocache)
18232 CFI_STARTPROC
18233+
18234+#ifdef CONFIG_PAX_MEMORY_UDEREF
18235+ mov $PAX_USER_SHADOW_BASE,%rcx
18236+ cmp %rcx,%rsi
18237+ jae 1f
18238+ add %rcx,%rsi
18239+1:
18240+#endif
18241+
18242 cmpl $8,%edx
18243 jb 20f /* less then 8 bytes, go to byte copy loop */
18244 ALIGN_DESTINATION
18245@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18246 jnz 21b
18247 23: xorl %eax,%eax
18248 sfence
18249+ pax_force_retaddr
18250 ret
18251
18252 .section .fixup,"ax"
18253diff -urNp linux-3.1.1/arch/x86/lib/csum-copy_64.S linux-3.1.1/arch/x86/lib/csum-copy_64.S
18254--- linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-11 15:19:27.000000000 -0500
18255+++ linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-16 18:39:07.000000000 -0500
18256@@ -8,6 +8,7 @@
18257 #include <linux/linkage.h>
18258 #include <asm/dwarf2.h>
18259 #include <asm/errno.h>
18260+#include <asm/alternative-asm.h>
18261
18262 /*
18263 * Checksum copy with exception handling.
18264@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18265 CFI_RESTORE rbp
18266 addq $7*8, %rsp
18267 CFI_ADJUST_CFA_OFFSET -7*8
18268+ pax_force_retaddr
18269 ret
18270 CFI_RESTORE_STATE
18271
18272diff -urNp linux-3.1.1/arch/x86/lib/csum-wrappers_64.c linux-3.1.1/arch/x86/lib/csum-wrappers_64.c
18273--- linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-11 15:19:27.000000000 -0500
18274+++ linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-16 18:39:07.000000000 -0500
18275@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18276 len -= 2;
18277 }
18278 }
18279- isum = csum_partial_copy_generic((__force const void *)src,
18280+
18281+#ifdef CONFIG_PAX_MEMORY_UDEREF
18282+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18283+ src += PAX_USER_SHADOW_BASE;
18284+#endif
18285+
18286+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18287 dst, len, isum, errp, NULL);
18288 if (unlikely(*errp))
18289 goto out_err;
18290@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18291 }
18292
18293 *errp = 0;
18294- return csum_partial_copy_generic(src, (void __force *)dst,
18295+
18296+#ifdef CONFIG_PAX_MEMORY_UDEREF
18297+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18298+ dst += PAX_USER_SHADOW_BASE;
18299+#endif
18300+
18301+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18302 len, isum, NULL, errp);
18303 }
18304 EXPORT_SYMBOL(csum_partial_copy_to_user);
18305diff -urNp linux-3.1.1/arch/x86/lib/getuser.S linux-3.1.1/arch/x86/lib/getuser.S
18306--- linux-3.1.1/arch/x86/lib/getuser.S 2011-11-11 15:19:27.000000000 -0500
18307+++ linux-3.1.1/arch/x86/lib/getuser.S 2011-11-16 18:39:07.000000000 -0500
18308@@ -33,15 +33,38 @@
18309 #include <asm/asm-offsets.h>
18310 #include <asm/thread_info.h>
18311 #include <asm/asm.h>
18312+#include <asm/segment.h>
18313+#include <asm/pgtable.h>
18314+#include <asm/alternative-asm.h>
18315+
18316+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18317+#define __copyuser_seg gs;
18318+#else
18319+#define __copyuser_seg
18320+#endif
18321
18322 .text
18323 ENTRY(__get_user_1)
18324 CFI_STARTPROC
18325+
18326+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18327 GET_THREAD_INFO(%_ASM_DX)
18328 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18329 jae bad_get_user
18330-1: movzb (%_ASM_AX),%edx
18331+
18332+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18333+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18334+ cmp %_ASM_DX,%_ASM_AX
18335+ jae 1234f
18336+ add %_ASM_DX,%_ASM_AX
18337+1234:
18338+#endif
18339+
18340+#endif
18341+
18342+1: __copyuser_seg movzb (%_ASM_AX),%edx
18343 xor %eax,%eax
18344+ pax_force_retaddr
18345 ret
18346 CFI_ENDPROC
18347 ENDPROC(__get_user_1)
18348@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18349 ENTRY(__get_user_2)
18350 CFI_STARTPROC
18351 add $1,%_ASM_AX
18352+
18353+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18354 jc bad_get_user
18355 GET_THREAD_INFO(%_ASM_DX)
18356 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18357 jae bad_get_user
18358-2: movzwl -1(%_ASM_AX),%edx
18359+
18360+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18361+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18362+ cmp %_ASM_DX,%_ASM_AX
18363+ jae 1234f
18364+ add %_ASM_DX,%_ASM_AX
18365+1234:
18366+#endif
18367+
18368+#endif
18369+
18370+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18371 xor %eax,%eax
18372+ pax_force_retaddr
18373 ret
18374 CFI_ENDPROC
18375 ENDPROC(__get_user_2)
18376@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18377 ENTRY(__get_user_4)
18378 CFI_STARTPROC
18379 add $3,%_ASM_AX
18380+
18381+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18382 jc bad_get_user
18383 GET_THREAD_INFO(%_ASM_DX)
18384 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18385 jae bad_get_user
18386-3: mov -3(%_ASM_AX),%edx
18387+
18388+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18389+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18390+ cmp %_ASM_DX,%_ASM_AX
18391+ jae 1234f
18392+ add %_ASM_DX,%_ASM_AX
18393+1234:
18394+#endif
18395+
18396+#endif
18397+
18398+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18399 xor %eax,%eax
18400+ pax_force_retaddr
18401 ret
18402 CFI_ENDPROC
18403 ENDPROC(__get_user_4)
18404@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18405 GET_THREAD_INFO(%_ASM_DX)
18406 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18407 jae bad_get_user
18408+
18409+#ifdef CONFIG_PAX_MEMORY_UDEREF
18410+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18411+ cmp %_ASM_DX,%_ASM_AX
18412+ jae 1234f
18413+ add %_ASM_DX,%_ASM_AX
18414+1234:
18415+#endif
18416+
18417 4: movq -7(%_ASM_AX),%_ASM_DX
18418 xor %eax,%eax
18419+ pax_force_retaddr
18420 ret
18421 CFI_ENDPROC
18422 ENDPROC(__get_user_8)
18423@@ -91,6 +152,7 @@ bad_get_user:
18424 CFI_STARTPROC
18425 xor %edx,%edx
18426 mov $(-EFAULT),%_ASM_AX
18427+ pax_force_retaddr
18428 ret
18429 CFI_ENDPROC
18430 END(bad_get_user)
18431diff -urNp linux-3.1.1/arch/x86/lib/insn.c linux-3.1.1/arch/x86/lib/insn.c
18432--- linux-3.1.1/arch/x86/lib/insn.c 2011-11-11 15:19:27.000000000 -0500
18433+++ linux-3.1.1/arch/x86/lib/insn.c 2011-11-16 18:39:07.000000000 -0500
18434@@ -21,6 +21,11 @@
18435 #include <linux/string.h>
18436 #include <asm/inat.h>
18437 #include <asm/insn.h>
18438+#ifdef __KERNEL__
18439+#include <asm/pgtable_types.h>
18440+#else
18441+#define ktla_ktva(addr) addr
18442+#endif
18443
18444 #define get_next(t, insn) \
18445 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18446@@ -40,8 +45,8 @@
18447 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18448 {
18449 memset(insn, 0, sizeof(*insn));
18450- insn->kaddr = kaddr;
18451- insn->next_byte = kaddr;
18452+ insn->kaddr = ktla_ktva(kaddr);
18453+ insn->next_byte = ktla_ktva(kaddr);
18454 insn->x86_64 = x86_64 ? 1 : 0;
18455 insn->opnd_bytes = 4;
18456 if (x86_64)
18457diff -urNp linux-3.1.1/arch/x86/lib/iomap_copy_64.S linux-3.1.1/arch/x86/lib/iomap_copy_64.S
18458--- linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-11 15:19:27.000000000 -0500
18459+++ linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-16 18:39:07.000000000 -0500
18460@@ -17,6 +17,7 @@
18461
18462 #include <linux/linkage.h>
18463 #include <asm/dwarf2.h>
18464+#include <asm/alternative-asm.h>
18465
18466 /*
18467 * override generic version in lib/iomap_copy.c
18468@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18469 CFI_STARTPROC
18470 movl %edx,%ecx
18471 rep movsd
18472+ pax_force_retaddr
18473 ret
18474 CFI_ENDPROC
18475 ENDPROC(__iowrite32_copy)
18476diff -urNp linux-3.1.1/arch/x86/lib/memcpy_64.S linux-3.1.1/arch/x86/lib/memcpy_64.S
18477--- linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-11 15:19:27.000000000 -0500
18478+++ linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-16 18:39:07.000000000 -0500
18479@@ -34,6 +34,7 @@
18480 rep movsq
18481 movl %edx, %ecx
18482 rep movsb
18483+ pax_force_retaddr
18484 ret
18485 .Lmemcpy_e:
18486 .previous
18487@@ -51,6 +52,7 @@
18488
18489 movl %edx, %ecx
18490 rep movsb
18491+ pax_force_retaddr
18492 ret
18493 .Lmemcpy_e_e:
18494 .previous
18495@@ -141,6 +143,7 @@ ENTRY(memcpy)
18496 movq %r9, 1*8(%rdi)
18497 movq %r10, -2*8(%rdi, %rdx)
18498 movq %r11, -1*8(%rdi, %rdx)
18499+ pax_force_retaddr
18500 retq
18501 .p2align 4
18502 .Lless_16bytes:
18503@@ -153,6 +156,7 @@ ENTRY(memcpy)
18504 movq -1*8(%rsi, %rdx), %r9
18505 movq %r8, 0*8(%rdi)
18506 movq %r9, -1*8(%rdi, %rdx)
18507+ pax_force_retaddr
18508 retq
18509 .p2align 4
18510 .Lless_8bytes:
18511@@ -166,6 +170,7 @@ ENTRY(memcpy)
18512 movl -4(%rsi, %rdx), %r8d
18513 movl %ecx, (%rdi)
18514 movl %r8d, -4(%rdi, %rdx)
18515+ pax_force_retaddr
18516 retq
18517 .p2align 4
18518 .Lless_3bytes:
18519@@ -183,6 +188,7 @@ ENTRY(memcpy)
18520 jnz .Lloop_1
18521
18522 .Lend:
18523+ pax_force_retaddr
18524 retq
18525 CFI_ENDPROC
18526 ENDPROC(memcpy)
18527diff -urNp linux-3.1.1/arch/x86/lib/memmove_64.S linux-3.1.1/arch/x86/lib/memmove_64.S
18528--- linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-11 15:19:27.000000000 -0500
18529+++ linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-16 18:39:07.000000000 -0500
18530@@ -202,6 +202,7 @@ ENTRY(memmove)
18531 movb (%rsi), %r11b
18532 movb %r11b, (%rdi)
18533 13:
18534+ pax_force_retaddr
18535 retq
18536 CFI_ENDPROC
18537
18538@@ -210,6 +211,7 @@ ENTRY(memmove)
18539 /* Forward moving data. */
18540 movq %rdx, %rcx
18541 rep movsb
18542+ pax_force_retaddr
18543 retq
18544 .Lmemmove_end_forward_efs:
18545 .previous
18546diff -urNp linux-3.1.1/arch/x86/lib/memset_64.S linux-3.1.1/arch/x86/lib/memset_64.S
18547--- linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-11 15:19:27.000000000 -0500
18548+++ linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-16 18:39:07.000000000 -0500
18549@@ -31,6 +31,7 @@
18550 movl %r8d,%ecx
18551 rep stosb
18552 movq %r9,%rax
18553+ pax_force_retaddr
18554 ret
18555 .Lmemset_e:
18556 .previous
18557@@ -53,6 +54,7 @@
18558 movl %edx,%ecx
18559 rep stosb
18560 movq %r9,%rax
18561+ pax_force_retaddr
18562 ret
18563 .Lmemset_e_e:
18564 .previous
18565@@ -121,6 +123,7 @@ ENTRY(__memset)
18566
18567 .Lende:
18568 movq %r10,%rax
18569+ pax_force_retaddr
18570 ret
18571
18572 CFI_RESTORE_STATE
18573diff -urNp linux-3.1.1/arch/x86/lib/mmx_32.c linux-3.1.1/arch/x86/lib/mmx_32.c
18574--- linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-11 15:19:27.000000000 -0500
18575+++ linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-16 18:39:07.000000000 -0500
18576@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18577 {
18578 void *p;
18579 int i;
18580+ unsigned long cr0;
18581
18582 if (unlikely(in_interrupt()))
18583 return __memcpy(to, from, len);
18584@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18585 kernel_fpu_begin();
18586
18587 __asm__ __volatile__ (
18588- "1: prefetch (%0)\n" /* This set is 28 bytes */
18589- " prefetch 64(%0)\n"
18590- " prefetch 128(%0)\n"
18591- " prefetch 192(%0)\n"
18592- " prefetch 256(%0)\n"
18593+ "1: prefetch (%1)\n" /* This set is 28 bytes */
18594+ " prefetch 64(%1)\n"
18595+ " prefetch 128(%1)\n"
18596+ " prefetch 192(%1)\n"
18597+ " prefetch 256(%1)\n"
18598 "2: \n"
18599 ".section .fixup, \"ax\"\n"
18600- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18601+ "3: \n"
18602+
18603+#ifdef CONFIG_PAX_KERNEXEC
18604+ " movl %%cr0, %0\n"
18605+ " movl %0, %%eax\n"
18606+ " andl $0xFFFEFFFF, %%eax\n"
18607+ " movl %%eax, %%cr0\n"
18608+#endif
18609+
18610+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18611+
18612+#ifdef CONFIG_PAX_KERNEXEC
18613+ " movl %0, %%cr0\n"
18614+#endif
18615+
18616 " jmp 2b\n"
18617 ".previous\n"
18618 _ASM_EXTABLE(1b, 3b)
18619- : : "r" (from));
18620+ : "=&r" (cr0) : "r" (from) : "ax");
18621
18622 for ( ; i > 5; i--) {
18623 __asm__ __volatile__ (
18624- "1: prefetch 320(%0)\n"
18625- "2: movq (%0), %%mm0\n"
18626- " movq 8(%0), %%mm1\n"
18627- " movq 16(%0), %%mm2\n"
18628- " movq 24(%0), %%mm3\n"
18629- " movq %%mm0, (%1)\n"
18630- " movq %%mm1, 8(%1)\n"
18631- " movq %%mm2, 16(%1)\n"
18632- " movq %%mm3, 24(%1)\n"
18633- " movq 32(%0), %%mm0\n"
18634- " movq 40(%0), %%mm1\n"
18635- " movq 48(%0), %%mm2\n"
18636- " movq 56(%0), %%mm3\n"
18637- " movq %%mm0, 32(%1)\n"
18638- " movq %%mm1, 40(%1)\n"
18639- " movq %%mm2, 48(%1)\n"
18640- " movq %%mm3, 56(%1)\n"
18641+ "1: prefetch 320(%1)\n"
18642+ "2: movq (%1), %%mm0\n"
18643+ " movq 8(%1), %%mm1\n"
18644+ " movq 16(%1), %%mm2\n"
18645+ " movq 24(%1), %%mm3\n"
18646+ " movq %%mm0, (%2)\n"
18647+ " movq %%mm1, 8(%2)\n"
18648+ " movq %%mm2, 16(%2)\n"
18649+ " movq %%mm3, 24(%2)\n"
18650+ " movq 32(%1), %%mm0\n"
18651+ " movq 40(%1), %%mm1\n"
18652+ " movq 48(%1), %%mm2\n"
18653+ " movq 56(%1), %%mm3\n"
18654+ " movq %%mm0, 32(%2)\n"
18655+ " movq %%mm1, 40(%2)\n"
18656+ " movq %%mm2, 48(%2)\n"
18657+ " movq %%mm3, 56(%2)\n"
18658 ".section .fixup, \"ax\"\n"
18659- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18660+ "3:\n"
18661+
18662+#ifdef CONFIG_PAX_KERNEXEC
18663+ " movl %%cr0, %0\n"
18664+ " movl %0, %%eax\n"
18665+ " andl $0xFFFEFFFF, %%eax\n"
18666+ " movl %%eax, %%cr0\n"
18667+#endif
18668+
18669+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18670+
18671+#ifdef CONFIG_PAX_KERNEXEC
18672+ " movl %0, %%cr0\n"
18673+#endif
18674+
18675 " jmp 2b\n"
18676 ".previous\n"
18677 _ASM_EXTABLE(1b, 3b)
18678- : : "r" (from), "r" (to) : "memory");
18679+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18680
18681 from += 64;
18682 to += 64;
18683@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18684 static void fast_copy_page(void *to, void *from)
18685 {
18686 int i;
18687+ unsigned long cr0;
18688
18689 kernel_fpu_begin();
18690
18691@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18692 * but that is for later. -AV
18693 */
18694 __asm__ __volatile__(
18695- "1: prefetch (%0)\n"
18696- " prefetch 64(%0)\n"
18697- " prefetch 128(%0)\n"
18698- " prefetch 192(%0)\n"
18699- " prefetch 256(%0)\n"
18700+ "1: prefetch (%1)\n"
18701+ " prefetch 64(%1)\n"
18702+ " prefetch 128(%1)\n"
18703+ " prefetch 192(%1)\n"
18704+ " prefetch 256(%1)\n"
18705 "2: \n"
18706 ".section .fixup, \"ax\"\n"
18707- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18708+ "3: \n"
18709+
18710+#ifdef CONFIG_PAX_KERNEXEC
18711+ " movl %%cr0, %0\n"
18712+ " movl %0, %%eax\n"
18713+ " andl $0xFFFEFFFF, %%eax\n"
18714+ " movl %%eax, %%cr0\n"
18715+#endif
18716+
18717+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18718+
18719+#ifdef CONFIG_PAX_KERNEXEC
18720+ " movl %0, %%cr0\n"
18721+#endif
18722+
18723 " jmp 2b\n"
18724 ".previous\n"
18725- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18726+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18727
18728 for (i = 0; i < (4096-320)/64; i++) {
18729 __asm__ __volatile__ (
18730- "1: prefetch 320(%0)\n"
18731- "2: movq (%0), %%mm0\n"
18732- " movntq %%mm0, (%1)\n"
18733- " movq 8(%0), %%mm1\n"
18734- " movntq %%mm1, 8(%1)\n"
18735- " movq 16(%0), %%mm2\n"
18736- " movntq %%mm2, 16(%1)\n"
18737- " movq 24(%0), %%mm3\n"
18738- " movntq %%mm3, 24(%1)\n"
18739- " movq 32(%0), %%mm4\n"
18740- " movntq %%mm4, 32(%1)\n"
18741- " movq 40(%0), %%mm5\n"
18742- " movntq %%mm5, 40(%1)\n"
18743- " movq 48(%0), %%mm6\n"
18744- " movntq %%mm6, 48(%1)\n"
18745- " movq 56(%0), %%mm7\n"
18746- " movntq %%mm7, 56(%1)\n"
18747+ "1: prefetch 320(%1)\n"
18748+ "2: movq (%1), %%mm0\n"
18749+ " movntq %%mm0, (%2)\n"
18750+ " movq 8(%1), %%mm1\n"
18751+ " movntq %%mm1, 8(%2)\n"
18752+ " movq 16(%1), %%mm2\n"
18753+ " movntq %%mm2, 16(%2)\n"
18754+ " movq 24(%1), %%mm3\n"
18755+ " movntq %%mm3, 24(%2)\n"
18756+ " movq 32(%1), %%mm4\n"
18757+ " movntq %%mm4, 32(%2)\n"
18758+ " movq 40(%1), %%mm5\n"
18759+ " movntq %%mm5, 40(%2)\n"
18760+ " movq 48(%1), %%mm6\n"
18761+ " movntq %%mm6, 48(%2)\n"
18762+ " movq 56(%1), %%mm7\n"
18763+ " movntq %%mm7, 56(%2)\n"
18764 ".section .fixup, \"ax\"\n"
18765- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18766+ "3:\n"
18767+
18768+#ifdef CONFIG_PAX_KERNEXEC
18769+ " movl %%cr0, %0\n"
18770+ " movl %0, %%eax\n"
18771+ " andl $0xFFFEFFFF, %%eax\n"
18772+ " movl %%eax, %%cr0\n"
18773+#endif
18774+
18775+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18776+
18777+#ifdef CONFIG_PAX_KERNEXEC
18778+ " movl %0, %%cr0\n"
18779+#endif
18780+
18781 " jmp 2b\n"
18782 ".previous\n"
18783- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18784+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18785
18786 from += 64;
18787 to += 64;
18788@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18789 static void fast_copy_page(void *to, void *from)
18790 {
18791 int i;
18792+ unsigned long cr0;
18793
18794 kernel_fpu_begin();
18795
18796 __asm__ __volatile__ (
18797- "1: prefetch (%0)\n"
18798- " prefetch 64(%0)\n"
18799- " prefetch 128(%0)\n"
18800- " prefetch 192(%0)\n"
18801- " prefetch 256(%0)\n"
18802+ "1: prefetch (%1)\n"
18803+ " prefetch 64(%1)\n"
18804+ " prefetch 128(%1)\n"
18805+ " prefetch 192(%1)\n"
18806+ " prefetch 256(%1)\n"
18807 "2: \n"
18808 ".section .fixup, \"ax\"\n"
18809- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18810+ "3: \n"
18811+
18812+#ifdef CONFIG_PAX_KERNEXEC
18813+ " movl %%cr0, %0\n"
18814+ " movl %0, %%eax\n"
18815+ " andl $0xFFFEFFFF, %%eax\n"
18816+ " movl %%eax, %%cr0\n"
18817+#endif
18818+
18819+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18820+
18821+#ifdef CONFIG_PAX_KERNEXEC
18822+ " movl %0, %%cr0\n"
18823+#endif
18824+
18825 " jmp 2b\n"
18826 ".previous\n"
18827- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18828+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18829
18830 for (i = 0; i < 4096/64; i++) {
18831 __asm__ __volatile__ (
18832- "1: prefetch 320(%0)\n"
18833- "2: movq (%0), %%mm0\n"
18834- " movq 8(%0), %%mm1\n"
18835- " movq 16(%0), %%mm2\n"
18836- " movq 24(%0), %%mm3\n"
18837- " movq %%mm0, (%1)\n"
18838- " movq %%mm1, 8(%1)\n"
18839- " movq %%mm2, 16(%1)\n"
18840- " movq %%mm3, 24(%1)\n"
18841- " movq 32(%0), %%mm0\n"
18842- " movq 40(%0), %%mm1\n"
18843- " movq 48(%0), %%mm2\n"
18844- " movq 56(%0), %%mm3\n"
18845- " movq %%mm0, 32(%1)\n"
18846- " movq %%mm1, 40(%1)\n"
18847- " movq %%mm2, 48(%1)\n"
18848- " movq %%mm3, 56(%1)\n"
18849+ "1: prefetch 320(%1)\n"
18850+ "2: movq (%1), %%mm0\n"
18851+ " movq 8(%1), %%mm1\n"
18852+ " movq 16(%1), %%mm2\n"
18853+ " movq 24(%1), %%mm3\n"
18854+ " movq %%mm0, (%2)\n"
18855+ " movq %%mm1, 8(%2)\n"
18856+ " movq %%mm2, 16(%2)\n"
18857+ " movq %%mm3, 24(%2)\n"
18858+ " movq 32(%1), %%mm0\n"
18859+ " movq 40(%1), %%mm1\n"
18860+ " movq 48(%1), %%mm2\n"
18861+ " movq 56(%1), %%mm3\n"
18862+ " movq %%mm0, 32(%2)\n"
18863+ " movq %%mm1, 40(%2)\n"
18864+ " movq %%mm2, 48(%2)\n"
18865+ " movq %%mm3, 56(%2)\n"
18866 ".section .fixup, \"ax\"\n"
18867- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18868+ "3:\n"
18869+
18870+#ifdef CONFIG_PAX_KERNEXEC
18871+ " movl %%cr0, %0\n"
18872+ " movl %0, %%eax\n"
18873+ " andl $0xFFFEFFFF, %%eax\n"
18874+ " movl %%eax, %%cr0\n"
18875+#endif
18876+
18877+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18878+
18879+#ifdef CONFIG_PAX_KERNEXEC
18880+ " movl %0, %%cr0\n"
18881+#endif
18882+
18883 " jmp 2b\n"
18884 ".previous\n"
18885 _ASM_EXTABLE(1b, 3b)
18886- : : "r" (from), "r" (to) : "memory");
18887+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18888
18889 from += 64;
18890 to += 64;
18891diff -urNp linux-3.1.1/arch/x86/lib/msr-reg.S linux-3.1.1/arch/x86/lib/msr-reg.S
18892--- linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-11 15:19:27.000000000 -0500
18893+++ linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-16 18:39:07.000000000 -0500
18894@@ -3,6 +3,7 @@
18895 #include <asm/dwarf2.h>
18896 #include <asm/asm.h>
18897 #include <asm/msr.h>
18898+#include <asm/alternative-asm.h>
18899
18900 #ifdef CONFIG_X86_64
18901 /*
18902@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18903 movl %edi, 28(%r10)
18904 popq_cfi %rbp
18905 popq_cfi %rbx
18906+ pax_force_retaddr
18907 ret
18908 3:
18909 CFI_RESTORE_STATE
18910diff -urNp linux-3.1.1/arch/x86/lib/putuser.S linux-3.1.1/arch/x86/lib/putuser.S
18911--- linux-3.1.1/arch/x86/lib/putuser.S 2011-11-11 15:19:27.000000000 -0500
18912+++ linux-3.1.1/arch/x86/lib/putuser.S 2011-11-16 18:39:07.000000000 -0500
18913@@ -15,7 +15,9 @@
18914 #include <asm/thread_info.h>
18915 #include <asm/errno.h>
18916 #include <asm/asm.h>
18917-
18918+#include <asm/segment.h>
18919+#include <asm/pgtable.h>
18920+#include <asm/alternative-asm.h>
18921
18922 /*
18923 * __put_user_X
18924@@ -29,52 +31,119 @@
18925 * as they get called from within inline assembly.
18926 */
18927
18928-#define ENTER CFI_STARTPROC ; \
18929- GET_THREAD_INFO(%_ASM_BX)
18930-#define EXIT ret ; \
18931+#define ENTER CFI_STARTPROC
18932+#define EXIT pax_force_retaddr; ret ; \
18933 CFI_ENDPROC
18934
18935+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18936+#define _DEST %_ASM_CX,%_ASM_BX
18937+#else
18938+#define _DEST %_ASM_CX
18939+#endif
18940+
18941+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18942+#define __copyuser_seg gs;
18943+#else
18944+#define __copyuser_seg
18945+#endif
18946+
18947 .text
18948 ENTRY(__put_user_1)
18949 ENTER
18950+
18951+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18952+ GET_THREAD_INFO(%_ASM_BX)
18953 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18954 jae bad_put_user
18955-1: movb %al,(%_ASM_CX)
18956+
18957+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18958+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18959+ cmp %_ASM_BX,%_ASM_CX
18960+ jb 1234f
18961+ xor %ebx,%ebx
18962+1234:
18963+#endif
18964+
18965+#endif
18966+
18967+1: __copyuser_seg movb %al,(_DEST)
18968 xor %eax,%eax
18969 EXIT
18970 ENDPROC(__put_user_1)
18971
18972 ENTRY(__put_user_2)
18973 ENTER
18974+
18975+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18976+ GET_THREAD_INFO(%_ASM_BX)
18977 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18978 sub $1,%_ASM_BX
18979 cmp %_ASM_BX,%_ASM_CX
18980 jae bad_put_user
18981-2: movw %ax,(%_ASM_CX)
18982+
18983+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18984+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18985+ cmp %_ASM_BX,%_ASM_CX
18986+ jb 1234f
18987+ xor %ebx,%ebx
18988+1234:
18989+#endif
18990+
18991+#endif
18992+
18993+2: __copyuser_seg movw %ax,(_DEST)
18994 xor %eax,%eax
18995 EXIT
18996 ENDPROC(__put_user_2)
18997
18998 ENTRY(__put_user_4)
18999 ENTER
19000+
19001+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19002+ GET_THREAD_INFO(%_ASM_BX)
19003 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19004 sub $3,%_ASM_BX
19005 cmp %_ASM_BX,%_ASM_CX
19006 jae bad_put_user
19007-3: movl %eax,(%_ASM_CX)
19008+
19009+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19010+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19011+ cmp %_ASM_BX,%_ASM_CX
19012+ jb 1234f
19013+ xor %ebx,%ebx
19014+1234:
19015+#endif
19016+
19017+#endif
19018+
19019+3: __copyuser_seg movl %eax,(_DEST)
19020 xor %eax,%eax
19021 EXIT
19022 ENDPROC(__put_user_4)
19023
19024 ENTRY(__put_user_8)
19025 ENTER
19026+
19027+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19028+ GET_THREAD_INFO(%_ASM_BX)
19029 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19030 sub $7,%_ASM_BX
19031 cmp %_ASM_BX,%_ASM_CX
19032 jae bad_put_user
19033-4: mov %_ASM_AX,(%_ASM_CX)
19034+
19035+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19036+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19037+ cmp %_ASM_BX,%_ASM_CX
19038+ jb 1234f
19039+ xor %ebx,%ebx
19040+1234:
19041+#endif
19042+
19043+#endif
19044+
19045+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19046 #ifdef CONFIG_X86_32
19047-5: movl %edx,4(%_ASM_CX)
19048+5: __copyuser_seg movl %edx,4(_DEST)
19049 #endif
19050 xor %eax,%eax
19051 EXIT
19052diff -urNp linux-3.1.1/arch/x86/lib/rwlock.S linux-3.1.1/arch/x86/lib/rwlock.S
19053--- linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-11 15:19:27.000000000 -0500
19054+++ linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-16 18:39:07.000000000 -0500
19055@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
19056 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
19057 jnz 0b
19058 ENDFRAME
19059+ pax_force_retaddr
19060 ret
19061 CFI_ENDPROC
19062 END(__write_lock_failed)
19063@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
19064 READ_LOCK_SIZE(dec) (%__lock_ptr)
19065 js 0b
19066 ENDFRAME
19067+ pax_force_retaddr
19068 ret
19069 CFI_ENDPROC
19070 END(__read_lock_failed)
19071diff -urNp linux-3.1.1/arch/x86/lib/rwsem.S linux-3.1.1/arch/x86/lib/rwsem.S
19072--- linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-11 15:19:27.000000000 -0500
19073+++ linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-16 18:39:07.000000000 -0500
19074@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
19075 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19076 CFI_RESTORE __ASM_REG(dx)
19077 restore_common_regs
19078+ pax_force_retaddr
19079 ret
19080 CFI_ENDPROC
19081 ENDPROC(call_rwsem_down_read_failed)
19082@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
19083 movq %rax,%rdi
19084 call rwsem_down_write_failed
19085 restore_common_regs
19086+ pax_force_retaddr
19087 ret
19088 CFI_ENDPROC
19089 ENDPROC(call_rwsem_down_write_failed)
19090@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
19091 movq %rax,%rdi
19092 call rwsem_wake
19093 restore_common_regs
19094-1: ret
19095+1: pax_force_retaddr
19096+ ret
19097 CFI_ENDPROC
19098 ENDPROC(call_rwsem_wake)
19099
19100@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
19101 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19102 CFI_RESTORE __ASM_REG(dx)
19103 restore_common_regs
19104+ pax_force_retaddr
19105 ret
19106 CFI_ENDPROC
19107 ENDPROC(call_rwsem_downgrade_wake)
19108diff -urNp linux-3.1.1/arch/x86/lib/thunk_64.S linux-3.1.1/arch/x86/lib/thunk_64.S
19109--- linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-11 15:19:27.000000000 -0500
19110+++ linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-16 18:39:07.000000000 -0500
19111@@ -8,6 +8,7 @@
19112 #include <linux/linkage.h>
19113 #include <asm/dwarf2.h>
19114 #include <asm/calling.h>
19115+#include <asm/alternative-asm.h>
19116
19117 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19118 .macro THUNK name, func, put_ret_addr_in_rdi=0
19119@@ -41,5 +42,6 @@
19120 SAVE_ARGS
19121 restore:
19122 RESTORE_ARGS
19123+ pax_force_retaddr
19124 ret
19125 CFI_ENDPROC
19126diff -urNp linux-3.1.1/arch/x86/lib/usercopy_32.c linux-3.1.1/arch/x86/lib/usercopy_32.c
19127--- linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-11 15:19:27.000000000 -0500
19128+++ linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-16 18:39:07.000000000 -0500
19129@@ -43,7 +43,7 @@ do { \
19130 __asm__ __volatile__( \
19131 " testl %1,%1\n" \
19132 " jz 2f\n" \
19133- "0: lodsb\n" \
19134+ "0: "__copyuser_seg"lodsb\n" \
19135 " stosb\n" \
19136 " testb %%al,%%al\n" \
19137 " jz 1f\n" \
19138@@ -128,10 +128,12 @@ do { \
19139 int __d0; \
19140 might_fault(); \
19141 __asm__ __volatile__( \
19142+ __COPYUSER_SET_ES \
19143 "0: rep; stosl\n" \
19144 " movl %2,%0\n" \
19145 "1: rep; stosb\n" \
19146 "2:\n" \
19147+ __COPYUSER_RESTORE_ES \
19148 ".section .fixup,\"ax\"\n" \
19149 "3: lea 0(%2,%0,4),%0\n" \
19150 " jmp 2b\n" \
19151@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19152 might_fault();
19153
19154 __asm__ __volatile__(
19155+ __COPYUSER_SET_ES
19156 " testl %0, %0\n"
19157 " jz 3f\n"
19158 " andl %0,%%ecx\n"
19159@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19160 " subl %%ecx,%0\n"
19161 " addl %0,%%eax\n"
19162 "1:\n"
19163+ __COPYUSER_RESTORE_ES
19164 ".section .fixup,\"ax\"\n"
19165 "2: xorl %%eax,%%eax\n"
19166 " jmp 1b\n"
19167@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19168
19169 #ifdef CONFIG_X86_INTEL_USERCOPY
19170 static unsigned long
19171-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19172+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19173 {
19174 int d0, d1;
19175 __asm__ __volatile__(
19176@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19177 " .align 2,0x90\n"
19178 "3: movl 0(%4), %%eax\n"
19179 "4: movl 4(%4), %%edx\n"
19180- "5: movl %%eax, 0(%3)\n"
19181- "6: movl %%edx, 4(%3)\n"
19182+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19183+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19184 "7: movl 8(%4), %%eax\n"
19185 "8: movl 12(%4),%%edx\n"
19186- "9: movl %%eax, 8(%3)\n"
19187- "10: movl %%edx, 12(%3)\n"
19188+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19189+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19190 "11: movl 16(%4), %%eax\n"
19191 "12: movl 20(%4), %%edx\n"
19192- "13: movl %%eax, 16(%3)\n"
19193- "14: movl %%edx, 20(%3)\n"
19194+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19195+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19196 "15: movl 24(%4), %%eax\n"
19197 "16: movl 28(%4), %%edx\n"
19198- "17: movl %%eax, 24(%3)\n"
19199- "18: movl %%edx, 28(%3)\n"
19200+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19201+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19202 "19: movl 32(%4), %%eax\n"
19203 "20: movl 36(%4), %%edx\n"
19204- "21: movl %%eax, 32(%3)\n"
19205- "22: movl %%edx, 36(%3)\n"
19206+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19207+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19208 "23: movl 40(%4), %%eax\n"
19209 "24: movl 44(%4), %%edx\n"
19210- "25: movl %%eax, 40(%3)\n"
19211- "26: movl %%edx, 44(%3)\n"
19212+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19213+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19214 "27: movl 48(%4), %%eax\n"
19215 "28: movl 52(%4), %%edx\n"
19216- "29: movl %%eax, 48(%3)\n"
19217- "30: movl %%edx, 52(%3)\n"
19218+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19219+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19220 "31: movl 56(%4), %%eax\n"
19221 "32: movl 60(%4), %%edx\n"
19222- "33: movl %%eax, 56(%3)\n"
19223- "34: movl %%edx, 60(%3)\n"
19224+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19225+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19226 " addl $-64, %0\n"
19227 " addl $64, %4\n"
19228 " addl $64, %3\n"
19229@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19230 " shrl $2, %0\n"
19231 " andl $3, %%eax\n"
19232 " cld\n"
19233+ __COPYUSER_SET_ES
19234 "99: rep; movsl\n"
19235 "36: movl %%eax, %0\n"
19236 "37: rep; movsb\n"
19237 "100:\n"
19238+ __COPYUSER_RESTORE_ES
19239+ ".section .fixup,\"ax\"\n"
19240+ "101: lea 0(%%eax,%0,4),%0\n"
19241+ " jmp 100b\n"
19242+ ".previous\n"
19243+ ".section __ex_table,\"a\"\n"
19244+ " .align 4\n"
19245+ " .long 1b,100b\n"
19246+ " .long 2b,100b\n"
19247+ " .long 3b,100b\n"
19248+ " .long 4b,100b\n"
19249+ " .long 5b,100b\n"
19250+ " .long 6b,100b\n"
19251+ " .long 7b,100b\n"
19252+ " .long 8b,100b\n"
19253+ " .long 9b,100b\n"
19254+ " .long 10b,100b\n"
19255+ " .long 11b,100b\n"
19256+ " .long 12b,100b\n"
19257+ " .long 13b,100b\n"
19258+ " .long 14b,100b\n"
19259+ " .long 15b,100b\n"
19260+ " .long 16b,100b\n"
19261+ " .long 17b,100b\n"
19262+ " .long 18b,100b\n"
19263+ " .long 19b,100b\n"
19264+ " .long 20b,100b\n"
19265+ " .long 21b,100b\n"
19266+ " .long 22b,100b\n"
19267+ " .long 23b,100b\n"
19268+ " .long 24b,100b\n"
19269+ " .long 25b,100b\n"
19270+ " .long 26b,100b\n"
19271+ " .long 27b,100b\n"
19272+ " .long 28b,100b\n"
19273+ " .long 29b,100b\n"
19274+ " .long 30b,100b\n"
19275+ " .long 31b,100b\n"
19276+ " .long 32b,100b\n"
19277+ " .long 33b,100b\n"
19278+ " .long 34b,100b\n"
19279+ " .long 35b,100b\n"
19280+ " .long 36b,100b\n"
19281+ " .long 37b,100b\n"
19282+ " .long 99b,101b\n"
19283+ ".previous"
19284+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19285+ : "1"(to), "2"(from), "0"(size)
19286+ : "eax", "edx", "memory");
19287+ return size;
19288+}
19289+
19290+static unsigned long
19291+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19292+{
19293+ int d0, d1;
19294+ __asm__ __volatile__(
19295+ " .align 2,0x90\n"
19296+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19297+ " cmpl $67, %0\n"
19298+ " jbe 3f\n"
19299+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19300+ " .align 2,0x90\n"
19301+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19302+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19303+ "5: movl %%eax, 0(%3)\n"
19304+ "6: movl %%edx, 4(%3)\n"
19305+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19306+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19307+ "9: movl %%eax, 8(%3)\n"
19308+ "10: movl %%edx, 12(%3)\n"
19309+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19310+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19311+ "13: movl %%eax, 16(%3)\n"
19312+ "14: movl %%edx, 20(%3)\n"
19313+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19314+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19315+ "17: movl %%eax, 24(%3)\n"
19316+ "18: movl %%edx, 28(%3)\n"
19317+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19318+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19319+ "21: movl %%eax, 32(%3)\n"
19320+ "22: movl %%edx, 36(%3)\n"
19321+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19322+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19323+ "25: movl %%eax, 40(%3)\n"
19324+ "26: movl %%edx, 44(%3)\n"
19325+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19326+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19327+ "29: movl %%eax, 48(%3)\n"
19328+ "30: movl %%edx, 52(%3)\n"
19329+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19330+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19331+ "33: movl %%eax, 56(%3)\n"
19332+ "34: movl %%edx, 60(%3)\n"
19333+ " addl $-64, %0\n"
19334+ " addl $64, %4\n"
19335+ " addl $64, %3\n"
19336+ " cmpl $63, %0\n"
19337+ " ja 1b\n"
19338+ "35: movl %0, %%eax\n"
19339+ " shrl $2, %0\n"
19340+ " andl $3, %%eax\n"
19341+ " cld\n"
19342+ "99: rep; "__copyuser_seg" movsl\n"
19343+ "36: movl %%eax, %0\n"
19344+ "37: rep; "__copyuser_seg" movsb\n"
19345+ "100:\n"
19346 ".section .fixup,\"ax\"\n"
19347 "101: lea 0(%%eax,%0,4),%0\n"
19348 " jmp 100b\n"
19349@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19350 int d0, d1;
19351 __asm__ __volatile__(
19352 " .align 2,0x90\n"
19353- "0: movl 32(%4), %%eax\n"
19354+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19355 " cmpl $67, %0\n"
19356 " jbe 2f\n"
19357- "1: movl 64(%4), %%eax\n"
19358+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19359 " .align 2,0x90\n"
19360- "2: movl 0(%4), %%eax\n"
19361- "21: movl 4(%4), %%edx\n"
19362+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19363+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19364 " movl %%eax, 0(%3)\n"
19365 " movl %%edx, 4(%3)\n"
19366- "3: movl 8(%4), %%eax\n"
19367- "31: movl 12(%4),%%edx\n"
19368+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19369+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19370 " movl %%eax, 8(%3)\n"
19371 " movl %%edx, 12(%3)\n"
19372- "4: movl 16(%4), %%eax\n"
19373- "41: movl 20(%4), %%edx\n"
19374+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19375+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19376 " movl %%eax, 16(%3)\n"
19377 " movl %%edx, 20(%3)\n"
19378- "10: movl 24(%4), %%eax\n"
19379- "51: movl 28(%4), %%edx\n"
19380+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19381+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19382 " movl %%eax, 24(%3)\n"
19383 " movl %%edx, 28(%3)\n"
19384- "11: movl 32(%4), %%eax\n"
19385- "61: movl 36(%4), %%edx\n"
19386+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19387+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19388 " movl %%eax, 32(%3)\n"
19389 " movl %%edx, 36(%3)\n"
19390- "12: movl 40(%4), %%eax\n"
19391- "71: movl 44(%4), %%edx\n"
19392+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19393+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19394 " movl %%eax, 40(%3)\n"
19395 " movl %%edx, 44(%3)\n"
19396- "13: movl 48(%4), %%eax\n"
19397- "81: movl 52(%4), %%edx\n"
19398+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19399+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19400 " movl %%eax, 48(%3)\n"
19401 " movl %%edx, 52(%3)\n"
19402- "14: movl 56(%4), %%eax\n"
19403- "91: movl 60(%4), %%edx\n"
19404+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19405+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19406 " movl %%eax, 56(%3)\n"
19407 " movl %%edx, 60(%3)\n"
19408 " addl $-64, %0\n"
19409@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19410 " shrl $2, %0\n"
19411 " andl $3, %%eax\n"
19412 " cld\n"
19413- "6: rep; movsl\n"
19414+ "6: rep; "__copyuser_seg" movsl\n"
19415 " movl %%eax,%0\n"
19416- "7: rep; movsb\n"
19417+ "7: rep; "__copyuser_seg" movsb\n"
19418 "8:\n"
19419 ".section .fixup,\"ax\"\n"
19420 "9: lea 0(%%eax,%0,4),%0\n"
19421@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19422
19423 __asm__ __volatile__(
19424 " .align 2,0x90\n"
19425- "0: movl 32(%4), %%eax\n"
19426+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19427 " cmpl $67, %0\n"
19428 " jbe 2f\n"
19429- "1: movl 64(%4), %%eax\n"
19430+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19431 " .align 2,0x90\n"
19432- "2: movl 0(%4), %%eax\n"
19433- "21: movl 4(%4), %%edx\n"
19434+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19435+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19436 " movnti %%eax, 0(%3)\n"
19437 " movnti %%edx, 4(%3)\n"
19438- "3: movl 8(%4), %%eax\n"
19439- "31: movl 12(%4),%%edx\n"
19440+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19441+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19442 " movnti %%eax, 8(%3)\n"
19443 " movnti %%edx, 12(%3)\n"
19444- "4: movl 16(%4), %%eax\n"
19445- "41: movl 20(%4), %%edx\n"
19446+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19447+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19448 " movnti %%eax, 16(%3)\n"
19449 " movnti %%edx, 20(%3)\n"
19450- "10: movl 24(%4), %%eax\n"
19451- "51: movl 28(%4), %%edx\n"
19452+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19453+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19454 " movnti %%eax, 24(%3)\n"
19455 " movnti %%edx, 28(%3)\n"
19456- "11: movl 32(%4), %%eax\n"
19457- "61: movl 36(%4), %%edx\n"
19458+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19459+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19460 " movnti %%eax, 32(%3)\n"
19461 " movnti %%edx, 36(%3)\n"
19462- "12: movl 40(%4), %%eax\n"
19463- "71: movl 44(%4), %%edx\n"
19464+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19465+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19466 " movnti %%eax, 40(%3)\n"
19467 " movnti %%edx, 44(%3)\n"
19468- "13: movl 48(%4), %%eax\n"
19469- "81: movl 52(%4), %%edx\n"
19470+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19471+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19472 " movnti %%eax, 48(%3)\n"
19473 " movnti %%edx, 52(%3)\n"
19474- "14: movl 56(%4), %%eax\n"
19475- "91: movl 60(%4), %%edx\n"
19476+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19477+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19478 " movnti %%eax, 56(%3)\n"
19479 " movnti %%edx, 60(%3)\n"
19480 " addl $-64, %0\n"
19481@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19482 " shrl $2, %0\n"
19483 " andl $3, %%eax\n"
19484 " cld\n"
19485- "6: rep; movsl\n"
19486+ "6: rep; "__copyuser_seg" movsl\n"
19487 " movl %%eax,%0\n"
19488- "7: rep; movsb\n"
19489+ "7: rep; "__copyuser_seg" movsb\n"
19490 "8:\n"
19491 ".section .fixup,\"ax\"\n"
19492 "9: lea 0(%%eax,%0,4),%0\n"
19493@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19494
19495 __asm__ __volatile__(
19496 " .align 2,0x90\n"
19497- "0: movl 32(%4), %%eax\n"
19498+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19499 " cmpl $67, %0\n"
19500 " jbe 2f\n"
19501- "1: movl 64(%4), %%eax\n"
19502+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19503 " .align 2,0x90\n"
19504- "2: movl 0(%4), %%eax\n"
19505- "21: movl 4(%4), %%edx\n"
19506+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19507+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19508 " movnti %%eax, 0(%3)\n"
19509 " movnti %%edx, 4(%3)\n"
19510- "3: movl 8(%4), %%eax\n"
19511- "31: movl 12(%4),%%edx\n"
19512+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19513+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19514 " movnti %%eax, 8(%3)\n"
19515 " movnti %%edx, 12(%3)\n"
19516- "4: movl 16(%4), %%eax\n"
19517- "41: movl 20(%4), %%edx\n"
19518+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19519+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19520 " movnti %%eax, 16(%3)\n"
19521 " movnti %%edx, 20(%3)\n"
19522- "10: movl 24(%4), %%eax\n"
19523- "51: movl 28(%4), %%edx\n"
19524+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19525+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19526 " movnti %%eax, 24(%3)\n"
19527 " movnti %%edx, 28(%3)\n"
19528- "11: movl 32(%4), %%eax\n"
19529- "61: movl 36(%4), %%edx\n"
19530+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19531+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19532 " movnti %%eax, 32(%3)\n"
19533 " movnti %%edx, 36(%3)\n"
19534- "12: movl 40(%4), %%eax\n"
19535- "71: movl 44(%4), %%edx\n"
19536+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19537+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19538 " movnti %%eax, 40(%3)\n"
19539 " movnti %%edx, 44(%3)\n"
19540- "13: movl 48(%4), %%eax\n"
19541- "81: movl 52(%4), %%edx\n"
19542+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19543+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19544 " movnti %%eax, 48(%3)\n"
19545 " movnti %%edx, 52(%3)\n"
19546- "14: movl 56(%4), %%eax\n"
19547- "91: movl 60(%4), %%edx\n"
19548+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19549+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19550 " movnti %%eax, 56(%3)\n"
19551 " movnti %%edx, 60(%3)\n"
19552 " addl $-64, %0\n"
19553@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19554 " shrl $2, %0\n"
19555 " andl $3, %%eax\n"
19556 " cld\n"
19557- "6: rep; movsl\n"
19558+ "6: rep; "__copyuser_seg" movsl\n"
19559 " movl %%eax,%0\n"
19560- "7: rep; movsb\n"
19561+ "7: rep; "__copyuser_seg" movsb\n"
19562 "8:\n"
19563 ".section .fixup,\"ax\"\n"
19564 "9: lea 0(%%eax,%0,4),%0\n"
19565@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19566 */
19567 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19568 unsigned long size);
19569-unsigned long __copy_user_intel(void __user *to, const void *from,
19570+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19571+ unsigned long size);
19572+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19573 unsigned long size);
19574 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19575 const void __user *from, unsigned long size);
19576 #endif /* CONFIG_X86_INTEL_USERCOPY */
19577
19578 /* Generic arbitrary sized copy. */
19579-#define __copy_user(to, from, size) \
19580+#define __copy_user(to, from, size, prefix, set, restore) \
19581 do { \
19582 int __d0, __d1, __d2; \
19583 __asm__ __volatile__( \
19584+ set \
19585 " cmp $7,%0\n" \
19586 " jbe 1f\n" \
19587 " movl %1,%0\n" \
19588 " negl %0\n" \
19589 " andl $7,%0\n" \
19590 " subl %0,%3\n" \
19591- "4: rep; movsb\n" \
19592+ "4: rep; "prefix"movsb\n" \
19593 " movl %3,%0\n" \
19594 " shrl $2,%0\n" \
19595 " andl $3,%3\n" \
19596 " .align 2,0x90\n" \
19597- "0: rep; movsl\n" \
19598+ "0: rep; "prefix"movsl\n" \
19599 " movl %3,%0\n" \
19600- "1: rep; movsb\n" \
19601+ "1: rep; "prefix"movsb\n" \
19602 "2:\n" \
19603+ restore \
19604 ".section .fixup,\"ax\"\n" \
19605 "5: addl %3,%0\n" \
19606 " jmp 2b\n" \
19607@@ -682,14 +799,14 @@ do { \
19608 " negl %0\n" \
19609 " andl $7,%0\n" \
19610 " subl %0,%3\n" \
19611- "4: rep; movsb\n" \
19612+ "4: rep; "__copyuser_seg"movsb\n" \
19613 " movl %3,%0\n" \
19614 " shrl $2,%0\n" \
19615 " andl $3,%3\n" \
19616 " .align 2,0x90\n" \
19617- "0: rep; movsl\n" \
19618+ "0: rep; "__copyuser_seg"movsl\n" \
19619 " movl %3,%0\n" \
19620- "1: rep; movsb\n" \
19621+ "1: rep; "__copyuser_seg"movsb\n" \
19622 "2:\n" \
19623 ".section .fixup,\"ax\"\n" \
19624 "5: addl %3,%0\n" \
19625@@ -775,9 +892,9 @@ survive:
19626 }
19627 #endif
19628 if (movsl_is_ok(to, from, n))
19629- __copy_user(to, from, n);
19630+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19631 else
19632- n = __copy_user_intel(to, from, n);
19633+ n = __generic_copy_to_user_intel(to, from, n);
19634 return n;
19635 }
19636 EXPORT_SYMBOL(__copy_to_user_ll);
19637@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19638 unsigned long n)
19639 {
19640 if (movsl_is_ok(to, from, n))
19641- __copy_user(to, from, n);
19642+ __copy_user(to, from, n, __copyuser_seg, "", "");
19643 else
19644- n = __copy_user_intel((void __user *)to,
19645- (const void *)from, n);
19646+ n = __generic_copy_from_user_intel(to, from, n);
19647 return n;
19648 }
19649 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19650@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19651 if (n > 64 && cpu_has_xmm2)
19652 n = __copy_user_intel_nocache(to, from, n);
19653 else
19654- __copy_user(to, from, n);
19655+ __copy_user(to, from, n, __copyuser_seg, "", "");
19656 #else
19657- __copy_user(to, from, n);
19658+ __copy_user(to, from, n, __copyuser_seg, "", "");
19659 #endif
19660 return n;
19661 }
19662 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19663
19664-/**
19665- * copy_to_user: - Copy a block of data into user space.
19666- * @to: Destination address, in user space.
19667- * @from: Source address, in kernel space.
19668- * @n: Number of bytes to copy.
19669- *
19670- * Context: User context only. This function may sleep.
19671- *
19672- * Copy data from kernel space to user space.
19673- *
19674- * Returns number of bytes that could not be copied.
19675- * On success, this will be zero.
19676- */
19677-unsigned long
19678-copy_to_user(void __user *to, const void *from, unsigned long n)
19679+void copy_from_user_overflow(void)
19680 {
19681- if (access_ok(VERIFY_WRITE, to, n))
19682- n = __copy_to_user(to, from, n);
19683- return n;
19684+ WARN(1, "Buffer overflow detected!\n");
19685 }
19686-EXPORT_SYMBOL(copy_to_user);
19687+EXPORT_SYMBOL(copy_from_user_overflow);
19688
19689-/**
19690- * copy_from_user: - Copy a block of data from user space.
19691- * @to: Destination address, in kernel space.
19692- * @from: Source address, in user space.
19693- * @n: Number of bytes to copy.
19694- *
19695- * Context: User context only. This function may sleep.
19696- *
19697- * Copy data from user space to kernel space.
19698- *
19699- * Returns number of bytes that could not be copied.
19700- * On success, this will be zero.
19701- *
19702- * If some data could not be copied, this function will pad the copied
19703- * data to the requested size using zero bytes.
19704- */
19705-unsigned long
19706-_copy_from_user(void *to, const void __user *from, unsigned long n)
19707+void copy_to_user_overflow(void)
19708 {
19709- if (access_ok(VERIFY_READ, from, n))
19710- n = __copy_from_user(to, from, n);
19711- else
19712- memset(to, 0, n);
19713- return n;
19714+ WARN(1, "Buffer overflow detected!\n");
19715 }
19716-EXPORT_SYMBOL(_copy_from_user);
19717+EXPORT_SYMBOL(copy_to_user_overflow);
19718
19719-void copy_from_user_overflow(void)
19720+#ifdef CONFIG_PAX_MEMORY_UDEREF
19721+void __set_fs(mm_segment_t x)
19722 {
19723- WARN(1, "Buffer overflow detected!\n");
19724+ switch (x.seg) {
19725+ case 0:
19726+ loadsegment(gs, 0);
19727+ break;
19728+ case TASK_SIZE_MAX:
19729+ loadsegment(gs, __USER_DS);
19730+ break;
19731+ case -1UL:
19732+ loadsegment(gs, __KERNEL_DS);
19733+ break;
19734+ default:
19735+ BUG();
19736+ }
19737+ return;
19738 }
19739-EXPORT_SYMBOL(copy_from_user_overflow);
19740+EXPORT_SYMBOL(__set_fs);
19741+
19742+void set_fs(mm_segment_t x)
19743+{
19744+ current_thread_info()->addr_limit = x;
19745+ __set_fs(x);
19746+}
19747+EXPORT_SYMBOL(set_fs);
19748+#endif
19749diff -urNp linux-3.1.1/arch/x86/lib/usercopy_64.c linux-3.1.1/arch/x86/lib/usercopy_64.c
19750--- linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
19751+++ linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
19752@@ -42,6 +42,12 @@ long
19753 __strncpy_from_user(char *dst, const char __user *src, long count)
19754 {
19755 long res;
19756+
19757+#ifdef CONFIG_PAX_MEMORY_UDEREF
19758+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19759+ src += PAX_USER_SHADOW_BASE;
19760+#endif
19761+
19762 __do_strncpy_from_user(dst, src, count, res);
19763 return res;
19764 }
19765@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19766 {
19767 long __d0;
19768 might_fault();
19769+
19770+#ifdef CONFIG_PAX_MEMORY_UDEREF
19771+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19772+ addr += PAX_USER_SHADOW_BASE;
19773+#endif
19774+
19775 /* no memory constraint because it doesn't change any memory gcc knows
19776 about */
19777 asm volatile(
19778@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19779
19780 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19781 {
19782- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19783- return copy_user_generic((__force void *)to, (__force void *)from, len);
19784- }
19785- return len;
19786+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19787+
19788+#ifdef CONFIG_PAX_MEMORY_UDEREF
19789+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19790+ to += PAX_USER_SHADOW_BASE;
19791+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19792+ from += PAX_USER_SHADOW_BASE;
19793+#endif
19794+
19795+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19796+ }
19797+ return len;
19798 }
19799 EXPORT_SYMBOL(copy_in_user);
19800
19801@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19802 * it is not necessary to optimize tail handling.
19803 */
19804 unsigned long
19805-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19806+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19807 {
19808 char c;
19809 unsigned zero_len;
19810diff -urNp linux-3.1.1/arch/x86/Makefile linux-3.1.1/arch/x86/Makefile
19811--- linux-3.1.1/arch/x86/Makefile 2011-11-11 15:19:27.000000000 -0500
19812+++ linux-3.1.1/arch/x86/Makefile 2011-11-16 18:40:08.000000000 -0500
19813@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19814 else
19815 BITS := 64
19816 UTS_MACHINE := x86_64
19817+ biarch := $(call cc-option,-m64)
19818 CHECKFLAGS += -D__x86_64__ -m64
19819
19820 KBUILD_AFLAGS += -m64
19821@@ -195,3 +196,12 @@ define archhelp
19822 echo ' FDARGS="..." arguments for the booted kernel'
19823 echo ' FDINITRD=file initrd for the booted kernel'
19824 endef
19825+
19826+define OLD_LD
19827+
19828+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19829+*** Please upgrade your binutils to 2.18 or newer
19830+endef
19831+
19832+archprepare:
19833+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19834diff -urNp linux-3.1.1/arch/x86/mm/extable.c linux-3.1.1/arch/x86/mm/extable.c
19835--- linux-3.1.1/arch/x86/mm/extable.c 2011-11-11 15:19:27.000000000 -0500
19836+++ linux-3.1.1/arch/x86/mm/extable.c 2011-11-16 18:39:07.000000000 -0500
19837@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19838 const struct exception_table_entry *fixup;
19839
19840 #ifdef CONFIG_PNPBIOS
19841- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19842+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19843 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19844 extern u32 pnp_bios_is_utter_crap;
19845 pnp_bios_is_utter_crap = 1;
19846diff -urNp linux-3.1.1/arch/x86/mm/fault.c linux-3.1.1/arch/x86/mm/fault.c
19847--- linux-3.1.1/arch/x86/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
19848+++ linux-3.1.1/arch/x86/mm/fault.c 2011-11-16 20:43:50.000000000 -0500
19849@@ -13,11 +13,18 @@
19850 #include <linux/perf_event.h> /* perf_sw_event */
19851 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19852 #include <linux/prefetch.h> /* prefetchw */
19853+#include <linux/unistd.h>
19854+#include <linux/compiler.h>
19855
19856 #include <asm/traps.h> /* dotraplinkage, ... */
19857 #include <asm/pgalloc.h> /* pgd_*(), ... */
19858 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19859 #include <asm/vsyscall.h>
19860+#include <asm/tlbflush.h>
19861+
19862+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19863+#include <asm/stacktrace.h>
19864+#endif
19865
19866 /*
19867 * Page fault error code bits:
19868@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
19869 int ret = 0;
19870
19871 /* kprobe_running() needs smp_processor_id() */
19872- if (kprobes_built_in() && !user_mode_vm(regs)) {
19873+ if (kprobes_built_in() && !user_mode(regs)) {
19874 preempt_disable();
19875 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19876 ret = 1;
19877@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19878 return !instr_lo || (instr_lo>>1) == 1;
19879 case 0x00:
19880 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19881- if (probe_kernel_address(instr, opcode))
19882+ if (user_mode(regs)) {
19883+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19884+ return 0;
19885+ } else if (probe_kernel_address(instr, opcode))
19886 return 0;
19887
19888 *prefetch = (instr_lo == 0xF) &&
19889@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19890 while (instr < max_instr) {
19891 unsigned char opcode;
19892
19893- if (probe_kernel_address(instr, opcode))
19894+ if (user_mode(regs)) {
19895+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19896+ break;
19897+ } else if (probe_kernel_address(instr, opcode))
19898 break;
19899
19900 instr++;
19901@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
19902 force_sig_info(si_signo, &info, tsk);
19903 }
19904
19905+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19906+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
19907+#endif
19908+
19909+#ifdef CONFIG_PAX_EMUTRAMP
19910+static int pax_handle_fetch_fault(struct pt_regs *regs);
19911+#endif
19912+
19913+#ifdef CONFIG_PAX_PAGEEXEC
19914+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19915+{
19916+ pgd_t *pgd;
19917+ pud_t *pud;
19918+ pmd_t *pmd;
19919+
19920+ pgd = pgd_offset(mm, address);
19921+ if (!pgd_present(*pgd))
19922+ return NULL;
19923+ pud = pud_offset(pgd, address);
19924+ if (!pud_present(*pud))
19925+ return NULL;
19926+ pmd = pmd_offset(pud, address);
19927+ if (!pmd_present(*pmd))
19928+ return NULL;
19929+ return pmd;
19930+}
19931+#endif
19932+
19933 DEFINE_SPINLOCK(pgd_lock);
19934 LIST_HEAD(pgd_list);
19935
19936@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
19937 for (address = VMALLOC_START & PMD_MASK;
19938 address >= TASK_SIZE && address < FIXADDR_TOP;
19939 address += PMD_SIZE) {
19940+
19941+#ifdef CONFIG_PAX_PER_CPU_PGD
19942+ unsigned long cpu;
19943+#else
19944 struct page *page;
19945+#endif
19946
19947 spin_lock(&pgd_lock);
19948+
19949+#ifdef CONFIG_PAX_PER_CPU_PGD
19950+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19951+ pgd_t *pgd = get_cpu_pgd(cpu);
19952+ pmd_t *ret;
19953+#else
19954 list_for_each_entry(page, &pgd_list, lru) {
19955+ pgd_t *pgd = page_address(page);
19956 spinlock_t *pgt_lock;
19957 pmd_t *ret;
19958
19959@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
19960 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19961
19962 spin_lock(pgt_lock);
19963- ret = vmalloc_sync_one(page_address(page), address);
19964+#endif
19965+
19966+ ret = vmalloc_sync_one(pgd, address);
19967+
19968+#ifndef CONFIG_PAX_PER_CPU_PGD
19969 spin_unlock(pgt_lock);
19970+#endif
19971
19972 if (!ret)
19973 break;
19974@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
19975 * an interrupt in the middle of a task switch..
19976 */
19977 pgd_paddr = read_cr3();
19978+
19979+#ifdef CONFIG_PAX_PER_CPU_PGD
19980+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19981+#endif
19982+
19983 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19984 if (!pmd_k)
19985 return -1;
19986@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
19987 * happen within a race in page table update. In the later
19988 * case just flush:
19989 */
19990+
19991+#ifdef CONFIG_PAX_PER_CPU_PGD
19992+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19993+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19994+#else
19995 pgd = pgd_offset(current->active_mm, address);
19996+#endif
19997+
19998 pgd_ref = pgd_offset_k(address);
19999 if (pgd_none(*pgd_ref))
20000 return -1;
20001@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *r
20002 static int is_errata100(struct pt_regs *regs, unsigned long address)
20003 {
20004 #ifdef CONFIG_X86_64
20005- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20006+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20007 return 1;
20008 #endif
20009 return 0;
20010@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
20011 }
20012
20013 static const char nx_warning[] = KERN_CRIT
20014-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20015+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20016
20017 static void
20018 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20019@@ -570,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
20020 if (!oops_may_print())
20021 return;
20022
20023- if (error_code & PF_INSTR) {
20024+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
20025 unsigned int level;
20026
20027 pte_t *pte = lookup_address(address, &level);
20028
20029 if (pte && pte_present(*pte) && !pte_exec(*pte))
20030- printk(nx_warning, current_uid());
20031+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20032+ }
20033+
20034+#ifdef CONFIG_PAX_KERNEXEC
20035+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20036+ if (current->signal->curr_ip)
20037+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20038+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20039+ else
20040+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20041+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20042 }
20043+#endif
20044
20045 printk(KERN_ALERT "BUG: unable to handle kernel ");
20046 if (address < PAGE_SIZE)
20047@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *r
20048 }
20049 #endif
20050
20051+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20052+ if (pax_is_fetch_fault(regs, error_code, address)) {
20053+
20054+#ifdef CONFIG_PAX_EMUTRAMP
20055+ switch (pax_handle_fetch_fault(regs)) {
20056+ case 2:
20057+ return;
20058+ }
20059+#endif
20060+
20061+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20062+ do_group_exit(SIGKILL);
20063+ }
20064+#endif
20065+
20066 if (unlikely(show_unhandled_signals))
20067 show_signal_msg(regs, error_code, address, tsk);
20068
20069@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned
20070 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
20071 printk(KERN_ERR
20072 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
20073- tsk->comm, tsk->pid, address);
20074+ tsk->comm, task_pid_nr(tsk), address);
20075 code = BUS_MCEERR_AR;
20076 }
20077 #endif
20078@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned
20079 return 1;
20080 }
20081
20082+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20083+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20084+{
20085+ pte_t *pte;
20086+ pmd_t *pmd;
20087+ spinlock_t *ptl;
20088+ unsigned char pte_mask;
20089+
20090+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20091+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20092+ return 0;
20093+
20094+ /* PaX: it's our fault, let's handle it if we can */
20095+
20096+ /* PaX: take a look at read faults before acquiring any locks */
20097+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20098+ /* instruction fetch attempt from a protected page in user mode */
20099+ up_read(&mm->mmap_sem);
20100+
20101+#ifdef CONFIG_PAX_EMUTRAMP
20102+ switch (pax_handle_fetch_fault(regs)) {
20103+ case 2:
20104+ return 1;
20105+ }
20106+#endif
20107+
20108+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20109+ do_group_exit(SIGKILL);
20110+ }
20111+
20112+ pmd = pax_get_pmd(mm, address);
20113+ if (unlikely(!pmd))
20114+ return 0;
20115+
20116+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20117+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20118+ pte_unmap_unlock(pte, ptl);
20119+ return 0;
20120+ }
20121+
20122+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20123+ /* write attempt to a protected page in user mode */
20124+ pte_unmap_unlock(pte, ptl);
20125+ return 0;
20126+ }
20127+
20128+#ifdef CONFIG_SMP
20129+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20130+#else
20131+ if (likely(address > get_limit(regs->cs)))
20132+#endif
20133+ {
20134+ set_pte(pte, pte_mkread(*pte));
20135+ __flush_tlb_one(address);
20136+ pte_unmap_unlock(pte, ptl);
20137+ up_read(&mm->mmap_sem);
20138+ return 1;
20139+ }
20140+
20141+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20142+
20143+ /*
20144+ * PaX: fill DTLB with user rights and retry
20145+ */
20146+ __asm__ __volatile__ (
20147+ "orb %2,(%1)\n"
20148+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20149+/*
20150+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20151+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20152+ * page fault when examined during a TLB load attempt. this is true not only
20153+ * for PTEs holding a non-present entry but also present entries that will
20154+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20155+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20156+ * for our target pages since their PTEs are simply not in the TLBs at all.
20157+
20158+ * the best thing in omitting it is that we gain around 15-20% speed in the
20159+ * fast path of the page fault handler and can get rid of tracing since we
20160+ * can no longer flush unintended entries.
20161+ */
20162+ "invlpg (%0)\n"
20163+#endif
20164+ __copyuser_seg"testb $0,(%0)\n"
20165+ "xorb %3,(%1)\n"
20166+ :
20167+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20168+ : "memory", "cc");
20169+ pte_unmap_unlock(pte, ptl);
20170+ up_read(&mm->mmap_sem);
20171+ return 1;
20172+}
20173+#endif
20174+
20175 /*
20176 * Handle a spurious fault caused by a stale TLB entry.
20177 *
20178@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
20179 static inline int
20180 access_error(unsigned long error_code, struct vm_area_struct *vma)
20181 {
20182+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20183+ return 1;
20184+
20185 if (error_code & PF_WRITE) {
20186 /* write, present and write, not present: */
20187 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20188@@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsi
20189 {
20190 struct vm_area_struct *vma;
20191 struct task_struct *tsk;
20192- unsigned long address;
20193 struct mm_struct *mm;
20194 int fault;
20195 int write = error_code & PF_WRITE;
20196 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20197 (write ? FAULT_FLAG_WRITE : 0);
20198
20199+ /* Get the faulting address: */
20200+ unsigned long address = read_cr2();
20201+
20202+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20203+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20204+ if (!search_exception_tables(regs->ip)) {
20205+ bad_area_nosemaphore(regs, error_code, address);
20206+ return;
20207+ }
20208+ if (address < PAX_USER_SHADOW_BASE) {
20209+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20210+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
20211+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20212+ } else
20213+ address -= PAX_USER_SHADOW_BASE;
20214+ }
20215+#endif
20216+
20217 tsk = current;
20218 mm = tsk->mm;
20219
20220- /* Get the faulting address: */
20221- address = read_cr2();
20222-
20223 /*
20224 * Detect and handle instructions that would cause a page fault for
20225 * both a tracked kernel page and a userspace page.
20226@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsi
20227 * User-mode registers count as a user access even for any
20228 * potential system fault or CPU buglet:
20229 */
20230- if (user_mode_vm(regs)) {
20231+ if (user_mode(regs)) {
20232 local_irq_enable();
20233 error_code |= PF_USER;
20234 } else {
20235@@ -1116,6 +1322,11 @@ retry:
20236 might_sleep();
20237 }
20238
20239+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20240+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20241+ return;
20242+#endif
20243+
20244 vma = find_vma(mm, address);
20245 if (unlikely(!vma)) {
20246 bad_area(regs, error_code, address);
20247@@ -1127,18 +1338,24 @@ retry:
20248 bad_area(regs, error_code, address);
20249 return;
20250 }
20251- if (error_code & PF_USER) {
20252- /*
20253- * Accessing the stack below %sp is always a bug.
20254- * The large cushion allows instructions like enter
20255- * and pusha to work. ("enter $65535, $31" pushes
20256- * 32 pointers and then decrements %sp by 65535.)
20257- */
20258- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20259- bad_area(regs, error_code, address);
20260- return;
20261- }
20262+ /*
20263+ * Accessing the stack below %sp is always a bug.
20264+ * The large cushion allows instructions like enter
20265+ * and pusha to work. ("enter $65535, $31" pushes
20266+ * 32 pointers and then decrements %sp by 65535.)
20267+ */
20268+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20269+ bad_area(regs, error_code, address);
20270+ return;
20271 }
20272+
20273+#ifdef CONFIG_PAX_SEGMEXEC
20274+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20275+ bad_area(regs, error_code, address);
20276+ return;
20277+ }
20278+#endif
20279+
20280 if (unlikely(expand_stack(vma, address))) {
20281 bad_area(regs, error_code, address);
20282 return;
20283@@ -1193,3 +1410,240 @@ good_area:
20284
20285 up_read(&mm->mmap_sem);
20286 }
20287+
20288+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20289+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
20290+{
20291+ struct mm_struct *mm = current->mm;
20292+ unsigned long ip = regs->ip;
20293+
20294+ if (v8086_mode(regs))
20295+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20296+
20297+#ifdef CONFIG_PAX_PAGEEXEC
20298+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
20299+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
20300+ return true;
20301+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
20302+ return true;
20303+ return false;
20304+ }
20305+#endif
20306+
20307+#ifdef CONFIG_PAX_SEGMEXEC
20308+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
20309+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
20310+ return true;
20311+ return false;
20312+ }
20313+#endif
20314+
20315+ return false;
20316+}
20317+#endif
20318+
20319+#ifdef CONFIG_PAX_EMUTRAMP
20320+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20321+{
20322+ int err;
20323+
20324+ do { /* PaX: gcc trampoline emulation #1 */
20325+ unsigned char mov1, mov2;
20326+ unsigned short jmp;
20327+ unsigned int addr1, addr2;
20328+
20329+#ifdef CONFIG_X86_64
20330+ if ((regs->ip + 11) >> 32)
20331+ break;
20332+#endif
20333+
20334+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20335+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20336+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20337+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20338+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20339+
20340+ if (err)
20341+ break;
20342+
20343+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20344+ regs->cx = addr1;
20345+ regs->ax = addr2;
20346+ regs->ip = addr2;
20347+ return 2;
20348+ }
20349+ } while (0);
20350+
20351+ do { /* PaX: gcc trampoline emulation #2 */
20352+ unsigned char mov, jmp;
20353+ unsigned int addr1, addr2;
20354+
20355+#ifdef CONFIG_X86_64
20356+ if ((regs->ip + 9) >> 32)
20357+ break;
20358+#endif
20359+
20360+ err = get_user(mov, (unsigned char __user *)regs->ip);
20361+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20362+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20363+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20364+
20365+ if (err)
20366+ break;
20367+
20368+ if (mov == 0xB9 && jmp == 0xE9) {
20369+ regs->cx = addr1;
20370+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20371+ return 2;
20372+ }
20373+ } while (0);
20374+
20375+ return 1; /* PaX in action */
20376+}
20377+
20378+#ifdef CONFIG_X86_64
20379+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20380+{
20381+ int err;
20382+
20383+ do { /* PaX: gcc trampoline emulation #1 */
20384+ unsigned short mov1, mov2, jmp1;
20385+ unsigned char jmp2;
20386+ unsigned int addr1;
20387+ unsigned long addr2;
20388+
20389+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20390+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20391+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20392+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20393+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20394+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20395+
20396+ if (err)
20397+ break;
20398+
20399+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20400+ regs->r11 = addr1;
20401+ regs->r10 = addr2;
20402+ regs->ip = addr1;
20403+ return 2;
20404+ }
20405+ } while (0);
20406+
20407+ do { /* PaX: gcc trampoline emulation #2 */
20408+ unsigned short mov1, mov2, jmp1;
20409+ unsigned char jmp2;
20410+ unsigned long addr1, addr2;
20411+
20412+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20413+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20414+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20415+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20416+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20417+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20418+
20419+ if (err)
20420+ break;
20421+
20422+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20423+ regs->r11 = addr1;
20424+ regs->r10 = addr2;
20425+ regs->ip = addr1;
20426+ return 2;
20427+ }
20428+ } while (0);
20429+
20430+ return 1; /* PaX in action */
20431+}
20432+#endif
20433+
20434+/*
20435+ * PaX: decide what to do with offenders (regs->ip = fault address)
20436+ *
20437+ * returns 1 when task should be killed
20438+ * 2 when gcc trampoline was detected
20439+ */
20440+static int pax_handle_fetch_fault(struct pt_regs *regs)
20441+{
20442+ if (v8086_mode(regs))
20443+ return 1;
20444+
20445+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20446+ return 1;
20447+
20448+#ifdef CONFIG_X86_32
20449+ return pax_handle_fetch_fault_32(regs);
20450+#else
20451+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20452+ return pax_handle_fetch_fault_32(regs);
20453+ else
20454+ return pax_handle_fetch_fault_64(regs);
20455+#endif
20456+}
20457+#endif
20458+
20459+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20460+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
20461+{
20462+ long i;
20463+
20464+ printk(KERN_ERR "PAX: bytes at PC: ");
20465+ for (i = 0; i < 20; i++) {
20466+ unsigned char c;
20467+ if (get_user(c, (unsigned char __force_user *)pc+i))
20468+ printk(KERN_CONT "?? ");
20469+ else
20470+ printk(KERN_CONT "%02x ", c);
20471+ }
20472+ printk("\n");
20473+
20474+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20475+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20476+ unsigned long c;
20477+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
20478+#ifdef CONFIG_X86_32
20479+ printk(KERN_CONT "???????? ");
20480+#else
20481+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
20482+ printk(KERN_CONT "???????? ???????? ");
20483+ else
20484+ printk(KERN_CONT "???????????????? ");
20485+#endif
20486+ } else {
20487+#ifdef CONFIG_X86_64
20488+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
20489+ printk(KERN_CONT "%08x ", (unsigned int)c);
20490+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
20491+ } else
20492+#endif
20493+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20494+ }
20495+ }
20496+ printk("\n");
20497+}
20498+#endif
20499+
20500+/**
20501+ * probe_kernel_write(): safely attempt to write to a location
20502+ * @dst: address to write to
20503+ * @src: pointer to the data that shall be written
20504+ * @size: size of the data chunk
20505+ *
20506+ * Safely write to address @dst from the buffer at @src. If a kernel fault
20507+ * happens, handle that and return -EFAULT.
20508+ */
20509+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20510+{
20511+ long ret;
20512+ mm_segment_t old_fs = get_fs();
20513+
20514+ set_fs(KERNEL_DS);
20515+ pagefault_disable();
20516+ pax_open_kernel();
20517+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20518+ pax_close_kernel();
20519+ pagefault_enable();
20520+ set_fs(old_fs);
20521+
20522+ return ret ? -EFAULT : 0;
20523+}
20524diff -urNp linux-3.1.1/arch/x86/mm/gup.c linux-3.1.1/arch/x86/mm/gup.c
20525--- linux-3.1.1/arch/x86/mm/gup.c 2011-11-11 15:19:27.000000000 -0500
20526+++ linux-3.1.1/arch/x86/mm/gup.c 2011-11-16 18:39:07.000000000 -0500
20527@@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long
20528 addr = start;
20529 len = (unsigned long) nr_pages << PAGE_SHIFT;
20530 end = start + len;
20531- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20532+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20533 (void __user *)start, len)))
20534 return 0;
20535
20536diff -urNp linux-3.1.1/arch/x86/mm/highmem_32.c linux-3.1.1/arch/x86/mm/highmem_32.c
20537--- linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-11 15:19:27.000000000 -0500
20538+++ linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-16 18:39:07.000000000 -0500
20539@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20540 idx = type + KM_TYPE_NR*smp_processor_id();
20541 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20542 BUG_ON(!pte_none(*(kmap_pte-idx)));
20543+
20544+ pax_open_kernel();
20545 set_pte(kmap_pte-idx, mk_pte(page, prot));
20546+ pax_close_kernel();
20547
20548 return (void *)vaddr;
20549 }
20550diff -urNp linux-3.1.1/arch/x86/mm/hugetlbpage.c linux-3.1.1/arch/x86/mm/hugetlbpage.c
20551--- linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
20552+++ linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
20553@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20554 struct hstate *h = hstate_file(file);
20555 struct mm_struct *mm = current->mm;
20556 struct vm_area_struct *vma;
20557- unsigned long start_addr;
20558+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20559+
20560+#ifdef CONFIG_PAX_SEGMEXEC
20561+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20562+ pax_task_size = SEGMEXEC_TASK_SIZE;
20563+#endif
20564+
20565+ pax_task_size -= PAGE_SIZE;
20566
20567 if (len > mm->cached_hole_size) {
20568- start_addr = mm->free_area_cache;
20569+ start_addr = mm->free_area_cache;
20570 } else {
20571- start_addr = TASK_UNMAPPED_BASE;
20572- mm->cached_hole_size = 0;
20573+ start_addr = mm->mmap_base;
20574+ mm->cached_hole_size = 0;
20575 }
20576
20577 full_search:
20578@@ -280,26 +287,27 @@ full_search:
20579
20580 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20581 /* At this point: (!vma || addr < vma->vm_end). */
20582- if (TASK_SIZE - len < addr) {
20583+ if (pax_task_size - len < addr) {
20584 /*
20585 * Start a new search - just in case we missed
20586 * some holes.
20587 */
20588- if (start_addr != TASK_UNMAPPED_BASE) {
20589- start_addr = TASK_UNMAPPED_BASE;
20590+ if (start_addr != mm->mmap_base) {
20591+ start_addr = mm->mmap_base;
20592 mm->cached_hole_size = 0;
20593 goto full_search;
20594 }
20595 return -ENOMEM;
20596 }
20597- if (!vma || addr + len <= vma->vm_start) {
20598- mm->free_area_cache = addr + len;
20599- return addr;
20600- }
20601+ if (check_heap_stack_gap(vma, addr, len))
20602+ break;
20603 if (addr + mm->cached_hole_size < vma->vm_start)
20604 mm->cached_hole_size = vma->vm_start - addr;
20605 addr = ALIGN(vma->vm_end, huge_page_size(h));
20606 }
20607+
20608+ mm->free_area_cache = addr + len;
20609+ return addr;
20610 }
20611
20612 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20613@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20614 {
20615 struct hstate *h = hstate_file(file);
20616 struct mm_struct *mm = current->mm;
20617- struct vm_area_struct *vma, *prev_vma;
20618- unsigned long base = mm->mmap_base, addr = addr0;
20619+ struct vm_area_struct *vma;
20620+ unsigned long base = mm->mmap_base, addr;
20621 unsigned long largest_hole = mm->cached_hole_size;
20622- int first_time = 1;
20623
20624 /* don't allow allocations above current base */
20625 if (mm->free_area_cache > base)
20626@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20627 largest_hole = 0;
20628 mm->free_area_cache = base;
20629 }
20630-try_again:
20631+
20632 /* make sure it can fit in the remaining address space */
20633 if (mm->free_area_cache < len)
20634 goto fail;
20635
20636 /* either no address requested or can't fit in requested address hole */
20637- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20638+ addr = (mm->free_area_cache - len);
20639 do {
20640+ addr &= huge_page_mask(h);
20641+ vma = find_vma(mm, addr);
20642 /*
20643 * Lookup failure means no vma is above this address,
20644 * i.e. return with success:
20645- */
20646- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20647- return addr;
20648-
20649- /*
20650 * new region fits between prev_vma->vm_end and
20651 * vma->vm_start, use it:
20652 */
20653- if (addr + len <= vma->vm_start &&
20654- (!prev_vma || (addr >= prev_vma->vm_end))) {
20655+ if (check_heap_stack_gap(vma, addr, len)) {
20656 /* remember the address as a hint for next time */
20657- mm->cached_hole_size = largest_hole;
20658- return (mm->free_area_cache = addr);
20659- } else {
20660- /* pull free_area_cache down to the first hole */
20661- if (mm->free_area_cache == vma->vm_end) {
20662- mm->free_area_cache = vma->vm_start;
20663- mm->cached_hole_size = largest_hole;
20664- }
20665+ mm->cached_hole_size = largest_hole;
20666+ return (mm->free_area_cache = addr);
20667+ }
20668+ /* pull free_area_cache down to the first hole */
20669+ if (mm->free_area_cache == vma->vm_end) {
20670+ mm->free_area_cache = vma->vm_start;
20671+ mm->cached_hole_size = largest_hole;
20672 }
20673
20674 /* remember the largest hole we saw so far */
20675 if (addr + largest_hole < vma->vm_start)
20676- largest_hole = vma->vm_start - addr;
20677+ largest_hole = vma->vm_start - addr;
20678
20679 /* try just below the current vma->vm_start */
20680- addr = (vma->vm_start - len) & huge_page_mask(h);
20681- } while (len <= vma->vm_start);
20682+ addr = skip_heap_stack_gap(vma, len);
20683+ } while (!IS_ERR_VALUE(addr));
20684
20685 fail:
20686 /*
20687- * if hint left us with no space for the requested
20688- * mapping then try again:
20689- */
20690- if (first_time) {
20691- mm->free_area_cache = base;
20692- largest_hole = 0;
20693- first_time = 0;
20694- goto try_again;
20695- }
20696- /*
20697 * A failed mmap() very likely causes application failure,
20698 * so fall back to the bottom-up function here. This scenario
20699 * can happen with large stack limits and large mmap()
20700 * allocations.
20701 */
20702- mm->free_area_cache = TASK_UNMAPPED_BASE;
20703+
20704+#ifdef CONFIG_PAX_SEGMEXEC
20705+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20706+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20707+ else
20708+#endif
20709+
20710+ mm->mmap_base = TASK_UNMAPPED_BASE;
20711+
20712+#ifdef CONFIG_PAX_RANDMMAP
20713+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20714+ mm->mmap_base += mm->delta_mmap;
20715+#endif
20716+
20717+ mm->free_area_cache = mm->mmap_base;
20718 mm->cached_hole_size = ~0UL;
20719 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20720 len, pgoff, flags);
20721@@ -386,6 +392,7 @@ fail:
20722 /*
20723 * Restore the topdown base:
20724 */
20725+ mm->mmap_base = base;
20726 mm->free_area_cache = base;
20727 mm->cached_hole_size = ~0UL;
20728
20729@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20730 struct hstate *h = hstate_file(file);
20731 struct mm_struct *mm = current->mm;
20732 struct vm_area_struct *vma;
20733+ unsigned long pax_task_size = TASK_SIZE;
20734
20735 if (len & ~huge_page_mask(h))
20736 return -EINVAL;
20737- if (len > TASK_SIZE)
20738+
20739+#ifdef CONFIG_PAX_SEGMEXEC
20740+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20741+ pax_task_size = SEGMEXEC_TASK_SIZE;
20742+#endif
20743+
20744+ pax_task_size -= PAGE_SIZE;
20745+
20746+ if (len > pax_task_size)
20747 return -ENOMEM;
20748
20749 if (flags & MAP_FIXED) {
20750@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20751 if (addr) {
20752 addr = ALIGN(addr, huge_page_size(h));
20753 vma = find_vma(mm, addr);
20754- if (TASK_SIZE - len >= addr &&
20755- (!vma || addr + len <= vma->vm_start))
20756+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20757 return addr;
20758 }
20759 if (mm->get_unmapped_area == arch_get_unmapped_area)
20760diff -urNp linux-3.1.1/arch/x86/mm/init_32.c linux-3.1.1/arch/x86/mm/init_32.c
20761--- linux-3.1.1/arch/x86/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
20762+++ linux-3.1.1/arch/x86/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
20763@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20764 }
20765
20766 /*
20767- * Creates a middle page table and puts a pointer to it in the
20768- * given global directory entry. This only returns the gd entry
20769- * in non-PAE compilation mode, since the middle layer is folded.
20770- */
20771-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20772-{
20773- pud_t *pud;
20774- pmd_t *pmd_table;
20775-
20776-#ifdef CONFIG_X86_PAE
20777- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20778- if (after_bootmem)
20779- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20780- else
20781- pmd_table = (pmd_t *)alloc_low_page();
20782- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20783- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20784- pud = pud_offset(pgd, 0);
20785- BUG_ON(pmd_table != pmd_offset(pud, 0));
20786-
20787- return pmd_table;
20788- }
20789-#endif
20790- pud = pud_offset(pgd, 0);
20791- pmd_table = pmd_offset(pud, 0);
20792-
20793- return pmd_table;
20794-}
20795-
20796-/*
20797 * Create a page table and place a pointer to it in a middle page
20798 * directory entry:
20799 */
20800@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20801 page_table = (pte_t *)alloc_low_page();
20802
20803 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20804+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20805+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20806+#else
20807 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20808+#endif
20809 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20810 }
20811
20812 return pte_offset_kernel(pmd, 0);
20813 }
20814
20815+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20816+{
20817+ pud_t *pud;
20818+ pmd_t *pmd_table;
20819+
20820+ pud = pud_offset(pgd, 0);
20821+ pmd_table = pmd_offset(pud, 0);
20822+
20823+ return pmd_table;
20824+}
20825+
20826 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20827 {
20828 int pgd_idx = pgd_index(vaddr);
20829@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20830 int pgd_idx, pmd_idx;
20831 unsigned long vaddr;
20832 pgd_t *pgd;
20833+ pud_t *pud;
20834 pmd_t *pmd;
20835 pte_t *pte = NULL;
20836
20837@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20838 pgd = pgd_base + pgd_idx;
20839
20840 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20841- pmd = one_md_table_init(pgd);
20842- pmd = pmd + pmd_index(vaddr);
20843+ pud = pud_offset(pgd, vaddr);
20844+ pmd = pmd_offset(pud, vaddr);
20845+
20846+#ifdef CONFIG_X86_PAE
20847+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20848+#endif
20849+
20850 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20851 pmd++, pmd_idx++) {
20852 pte = page_table_kmap_check(one_page_table_init(pmd),
20853@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20854 }
20855 }
20856
20857-static inline int is_kernel_text(unsigned long addr)
20858+static inline int is_kernel_text(unsigned long start, unsigned long end)
20859 {
20860- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20861- return 1;
20862- return 0;
20863+ if ((start > ktla_ktva((unsigned long)_etext) ||
20864+ end <= ktla_ktva((unsigned long)_stext)) &&
20865+ (start > ktla_ktva((unsigned long)_einittext) ||
20866+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20867+
20868+#ifdef CONFIG_ACPI_SLEEP
20869+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20870+#endif
20871+
20872+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20873+ return 0;
20874+ return 1;
20875 }
20876
20877 /*
20878@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20879 unsigned long last_map_addr = end;
20880 unsigned long start_pfn, end_pfn;
20881 pgd_t *pgd_base = swapper_pg_dir;
20882- int pgd_idx, pmd_idx, pte_ofs;
20883+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20884 unsigned long pfn;
20885 pgd_t *pgd;
20886+ pud_t *pud;
20887 pmd_t *pmd;
20888 pte_t *pte;
20889 unsigned pages_2m, pages_4k;
20890@@ -281,8 +282,13 @@ repeat:
20891 pfn = start_pfn;
20892 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20893 pgd = pgd_base + pgd_idx;
20894- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20895- pmd = one_md_table_init(pgd);
20896+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20897+ pud = pud_offset(pgd, 0);
20898+ pmd = pmd_offset(pud, 0);
20899+
20900+#ifdef CONFIG_X86_PAE
20901+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20902+#endif
20903
20904 if (pfn >= end_pfn)
20905 continue;
20906@@ -294,14 +300,13 @@ repeat:
20907 #endif
20908 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20909 pmd++, pmd_idx++) {
20910- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20911+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20912
20913 /*
20914 * Map with big pages if possible, otherwise
20915 * create normal page tables:
20916 */
20917 if (use_pse) {
20918- unsigned int addr2;
20919 pgprot_t prot = PAGE_KERNEL_LARGE;
20920 /*
20921 * first pass will use the same initial
20922@@ -311,11 +316,7 @@ repeat:
20923 __pgprot(PTE_IDENT_ATTR |
20924 _PAGE_PSE);
20925
20926- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20927- PAGE_OFFSET + PAGE_SIZE-1;
20928-
20929- if (is_kernel_text(addr) ||
20930- is_kernel_text(addr2))
20931+ if (is_kernel_text(address, address + PMD_SIZE))
20932 prot = PAGE_KERNEL_LARGE_EXEC;
20933
20934 pages_2m++;
20935@@ -332,7 +333,7 @@ repeat:
20936 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20937 pte += pte_ofs;
20938 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20939- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20940+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20941 pgprot_t prot = PAGE_KERNEL;
20942 /*
20943 * first pass will use the same initial
20944@@ -340,7 +341,7 @@ repeat:
20945 */
20946 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20947
20948- if (is_kernel_text(addr))
20949+ if (is_kernel_text(address, address + PAGE_SIZE))
20950 prot = PAGE_KERNEL_EXEC;
20951
20952 pages_4k++;
20953@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20954
20955 pud = pud_offset(pgd, va);
20956 pmd = pmd_offset(pud, va);
20957- if (!pmd_present(*pmd))
20958+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20959 break;
20960
20961 pte = pte_offset_kernel(pmd, va);
20962@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20963
20964 static void __init pagetable_init(void)
20965 {
20966- pgd_t *pgd_base = swapper_pg_dir;
20967-
20968- permanent_kmaps_init(pgd_base);
20969+ permanent_kmaps_init(swapper_pg_dir);
20970 }
20971
20972-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20973+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20974 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20975
20976 /* user-defined highmem size */
20977@@ -757,6 +756,12 @@ void __init mem_init(void)
20978
20979 pci_iommu_alloc();
20980
20981+#ifdef CONFIG_PAX_PER_CPU_PGD
20982+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20983+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20984+ KERNEL_PGD_PTRS);
20985+#endif
20986+
20987 #ifdef CONFIG_FLATMEM
20988 BUG_ON(!mem_map);
20989 #endif
20990@@ -774,7 +779,7 @@ void __init mem_init(void)
20991 set_highmem_pages_init();
20992
20993 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20994- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20995+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20996 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20997
20998 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20999@@ -815,10 +820,10 @@ void __init mem_init(void)
21000 ((unsigned long)&__init_end -
21001 (unsigned long)&__init_begin) >> 10,
21002
21003- (unsigned long)&_etext, (unsigned long)&_edata,
21004- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21005+ (unsigned long)&_sdata, (unsigned long)&_edata,
21006+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21007
21008- (unsigned long)&_text, (unsigned long)&_etext,
21009+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21010 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21011
21012 /*
21013@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
21014 if (!kernel_set_to_readonly)
21015 return;
21016
21017+ start = ktla_ktva(start);
21018 pr_debug("Set kernel text: %lx - %lx for read write\n",
21019 start, start+size);
21020
21021@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
21022 if (!kernel_set_to_readonly)
21023 return;
21024
21025+ start = ktla_ktva(start);
21026 pr_debug("Set kernel text: %lx - %lx for read only\n",
21027 start, start+size);
21028
21029@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
21030 unsigned long start = PFN_ALIGN(_text);
21031 unsigned long size = PFN_ALIGN(_etext) - start;
21032
21033+ start = ktla_ktva(start);
21034 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21035 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21036 size >> 10);
21037diff -urNp linux-3.1.1/arch/x86/mm/init_64.c linux-3.1.1/arch/x86/mm/init_64.c
21038--- linux-3.1.1/arch/x86/mm/init_64.c 2011-11-11 15:19:27.000000000 -0500
21039+++ linux-3.1.1/arch/x86/mm/init_64.c 2011-11-16 18:39:07.000000000 -0500
21040@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
21041 * around without checking the pgd every time.
21042 */
21043
21044-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
21045+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
21046 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21047
21048 int force_personality32;
21049@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
21050
21051 for (address = start; address <= end; address += PGDIR_SIZE) {
21052 const pgd_t *pgd_ref = pgd_offset_k(address);
21053+
21054+#ifdef CONFIG_PAX_PER_CPU_PGD
21055+ unsigned long cpu;
21056+#else
21057 struct page *page;
21058+#endif
21059
21060 if (pgd_none(*pgd_ref))
21061 continue;
21062
21063 spin_lock(&pgd_lock);
21064+
21065+#ifdef CONFIG_PAX_PER_CPU_PGD
21066+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21067+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21068+#else
21069 list_for_each_entry(page, &pgd_list, lru) {
21070 pgd_t *pgd;
21071 spinlock_t *pgt_lock;
21072@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21073 /* the pgt_lock only for Xen */
21074 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21075 spin_lock(pgt_lock);
21076+#endif
21077
21078 if (pgd_none(*pgd))
21079 set_pgd(pgd, *pgd_ref);
21080@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21081 BUG_ON(pgd_page_vaddr(*pgd)
21082 != pgd_page_vaddr(*pgd_ref));
21083
21084+#ifndef CONFIG_PAX_PER_CPU_PGD
21085 spin_unlock(pgt_lock);
21086+#endif
21087+
21088 }
21089 spin_unlock(&pgd_lock);
21090 }
21091@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21092 pmd = fill_pmd(pud, vaddr);
21093 pte = fill_pte(pmd, vaddr);
21094
21095+ pax_open_kernel();
21096 set_pte(pte, new_pte);
21097+ pax_close_kernel();
21098
21099 /*
21100 * It's enough to flush this one mapping.
21101@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21102 pgd = pgd_offset_k((unsigned long)__va(phys));
21103 if (pgd_none(*pgd)) {
21104 pud = (pud_t *) spp_getpage();
21105- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21106- _PAGE_USER));
21107+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21108 }
21109 pud = pud_offset(pgd, (unsigned long)__va(phys));
21110 if (pud_none(*pud)) {
21111 pmd = (pmd_t *) spp_getpage();
21112- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21113- _PAGE_USER));
21114+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21115 }
21116 pmd = pmd_offset(pud, phys);
21117 BUG_ON(!pmd_none(*pmd));
21118@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21119 if (pfn >= pgt_buf_top)
21120 panic("alloc_low_page: ran out of memory");
21121
21122- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21123+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21124 clear_page(adr);
21125 *phys = pfn * PAGE_SIZE;
21126 return adr;
21127@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21128
21129 phys = __pa(virt);
21130 left = phys & (PAGE_SIZE - 1);
21131- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21132+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21133 adr = (void *)(((unsigned long)adr) | left);
21134
21135 return adr;
21136@@ -693,6 +707,12 @@ void __init mem_init(void)
21137
21138 pci_iommu_alloc();
21139
21140+#ifdef CONFIG_PAX_PER_CPU_PGD
21141+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21142+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21143+ KERNEL_PGD_PTRS);
21144+#endif
21145+
21146 /* clear_bss() already clear the empty_zero_page */
21147
21148 reservedpages = 0;
21149@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21150 static struct vm_area_struct gate_vma = {
21151 .vm_start = VSYSCALL_START,
21152 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21153- .vm_page_prot = PAGE_READONLY_EXEC,
21154- .vm_flags = VM_READ | VM_EXEC
21155+ .vm_page_prot = PAGE_READONLY,
21156+ .vm_flags = VM_READ
21157 };
21158
21159 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21160@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21161
21162 const char *arch_vma_name(struct vm_area_struct *vma)
21163 {
21164- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21165+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21166 return "[vdso]";
21167 if (vma == &gate_vma)
21168 return "[vsyscall]";
21169diff -urNp linux-3.1.1/arch/x86/mm/init.c linux-3.1.1/arch/x86/mm/init.c
21170--- linux-3.1.1/arch/x86/mm/init.c 2011-11-11 15:19:27.000000000 -0500
21171+++ linux-3.1.1/arch/x86/mm/init.c 2011-11-16 18:40:08.000000000 -0500
21172@@ -31,7 +31,7 @@ int direct_gbpages
21173 static void __init find_early_table_space(unsigned long end, int use_pse,
21174 int use_gbpages)
21175 {
21176- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21177+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21178 phys_addr_t base;
21179
21180 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21181@@ -312,12 +312,34 @@ unsigned long __init_refok init_memory_m
21182 */
21183 int devmem_is_allowed(unsigned long pagenr)
21184 {
21185- if (pagenr <= 256)
21186+#ifdef CONFIG_GRKERNSEC_KMEM
21187+ /* allow BDA */
21188+ if (!pagenr)
21189+ return 1;
21190+ /* allow EBDA */
21191+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21192+ return 1;
21193+#else
21194+ if (!pagenr)
21195+ return 1;
21196+#ifdef CONFIG_VM86
21197+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21198+ return 1;
21199+#endif
21200+#endif
21201+
21202+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21203 return 1;
21204+#ifdef CONFIG_GRKERNSEC_KMEM
21205+ /* throw out everything else below 1MB */
21206+ if (pagenr <= 256)
21207+ return 0;
21208+#endif
21209 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21210 return 0;
21211 if (!page_is_ram(pagenr))
21212 return 1;
21213+
21214 return 0;
21215 }
21216
21217@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigne
21218
21219 void free_initmem(void)
21220 {
21221+
21222+#ifdef CONFIG_PAX_KERNEXEC
21223+#ifdef CONFIG_X86_32
21224+ /* PaX: limit KERNEL_CS to actual size */
21225+ unsigned long addr, limit;
21226+ struct desc_struct d;
21227+ int cpu;
21228+
21229+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21230+ limit = (limit - 1UL) >> PAGE_SHIFT;
21231+
21232+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21233+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21234+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21235+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21236+ }
21237+
21238+ /* PaX: make KERNEL_CS read-only */
21239+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21240+ if (!paravirt_enabled())
21241+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21242+/*
21243+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21244+ pgd = pgd_offset_k(addr);
21245+ pud = pud_offset(pgd, addr);
21246+ pmd = pmd_offset(pud, addr);
21247+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21248+ }
21249+*/
21250+#ifdef CONFIG_X86_PAE
21251+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21252+/*
21253+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21254+ pgd = pgd_offset_k(addr);
21255+ pud = pud_offset(pgd, addr);
21256+ pmd = pmd_offset(pud, addr);
21257+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21258+ }
21259+*/
21260+#endif
21261+
21262+#ifdef CONFIG_MODULES
21263+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21264+#endif
21265+
21266+#else
21267+ pgd_t *pgd;
21268+ pud_t *pud;
21269+ pmd_t *pmd;
21270+ unsigned long addr, end;
21271+
21272+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21273+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21274+ pgd = pgd_offset_k(addr);
21275+ pud = pud_offset(pgd, addr);
21276+ pmd = pmd_offset(pud, addr);
21277+ if (!pmd_present(*pmd))
21278+ continue;
21279+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21280+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21281+ else
21282+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21283+ }
21284+
21285+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21286+ end = addr + KERNEL_IMAGE_SIZE;
21287+ for (; addr < end; addr += PMD_SIZE) {
21288+ pgd = pgd_offset_k(addr);
21289+ pud = pud_offset(pgd, addr);
21290+ pmd = pmd_offset(pud, addr);
21291+ if (!pmd_present(*pmd))
21292+ continue;
21293+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21294+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21295+ }
21296+#endif
21297+
21298+ flush_tlb_all();
21299+#endif
21300+
21301 free_init_pages("unused kernel memory",
21302 (unsigned long)(&__init_begin),
21303 (unsigned long)(&__init_end));
21304diff -urNp linux-3.1.1/arch/x86/mm/iomap_32.c linux-3.1.1/arch/x86/mm/iomap_32.c
21305--- linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-11 15:19:27.000000000 -0500
21306+++ linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-16 18:39:07.000000000 -0500
21307@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21308 type = kmap_atomic_idx_push();
21309 idx = type + KM_TYPE_NR * smp_processor_id();
21310 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21311+
21312+ pax_open_kernel();
21313 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21314+ pax_close_kernel();
21315+
21316 arch_flush_lazy_mmu_mode();
21317
21318 return (void *)vaddr;
21319diff -urNp linux-3.1.1/arch/x86/mm/ioremap.c linux-3.1.1/arch/x86/mm/ioremap.c
21320--- linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-11 15:19:27.000000000 -0500
21321+++ linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-16 18:39:07.000000000 -0500
21322@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21323 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21324 int is_ram = page_is_ram(pfn);
21325
21326- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21327+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21328 return NULL;
21329 WARN_ON_ONCE(is_ram);
21330 }
21331@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21332 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21333
21334 static __initdata int after_paging_init;
21335-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21336+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21337
21338 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21339 {
21340@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21341 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21342
21343 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21344- memset(bm_pte, 0, sizeof(bm_pte));
21345- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21346+ pmd_populate_user(&init_mm, pmd, bm_pte);
21347
21348 /*
21349 * The boot-ioremap range spans multiple pmds, for which
21350diff -urNp linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c
21351--- linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-11 15:19:27.000000000 -0500
21352+++ linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-16 18:39:07.000000000 -0500
21353@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21354 * memory (e.g. tracked pages)? For now, we need this to avoid
21355 * invoking kmemcheck for PnP BIOS calls.
21356 */
21357- if (regs->flags & X86_VM_MASK)
21358+ if (v8086_mode(regs))
21359 return false;
21360- if (regs->cs != __KERNEL_CS)
21361+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21362 return false;
21363
21364 pte = kmemcheck_pte_lookup(address);
21365diff -urNp linux-3.1.1/arch/x86/mm/mmap.c linux-3.1.1/arch/x86/mm/mmap.c
21366--- linux-3.1.1/arch/x86/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
21367+++ linux-3.1.1/arch/x86/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
21368@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21369 * Leave an at least ~128 MB hole with possible stack randomization.
21370 */
21371 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21372-#define MAX_GAP (TASK_SIZE/6*5)
21373+#define MAX_GAP (pax_task_size/6*5)
21374
21375 /*
21376 * True on X86_32 or when emulating IA32 on X86_64
21377@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21378 return rnd << PAGE_SHIFT;
21379 }
21380
21381-static unsigned long mmap_base(void)
21382+static unsigned long mmap_base(struct mm_struct *mm)
21383 {
21384 unsigned long gap = rlimit(RLIMIT_STACK);
21385+ unsigned long pax_task_size = TASK_SIZE;
21386+
21387+#ifdef CONFIG_PAX_SEGMEXEC
21388+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21389+ pax_task_size = SEGMEXEC_TASK_SIZE;
21390+#endif
21391
21392 if (gap < MIN_GAP)
21393 gap = MIN_GAP;
21394 else if (gap > MAX_GAP)
21395 gap = MAX_GAP;
21396
21397- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21398+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21399 }
21400
21401 /*
21402 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21403 * does, but not when emulating X86_32
21404 */
21405-static unsigned long mmap_legacy_base(void)
21406+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21407 {
21408- if (mmap_is_ia32())
21409+ if (mmap_is_ia32()) {
21410+
21411+#ifdef CONFIG_PAX_SEGMEXEC
21412+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21413+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21414+ else
21415+#endif
21416+
21417 return TASK_UNMAPPED_BASE;
21418- else
21419+ } else
21420 return TASK_UNMAPPED_BASE + mmap_rnd();
21421 }
21422
21423@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21424 void arch_pick_mmap_layout(struct mm_struct *mm)
21425 {
21426 if (mmap_is_legacy()) {
21427- mm->mmap_base = mmap_legacy_base();
21428+ mm->mmap_base = mmap_legacy_base(mm);
21429+
21430+#ifdef CONFIG_PAX_RANDMMAP
21431+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21432+ mm->mmap_base += mm->delta_mmap;
21433+#endif
21434+
21435 mm->get_unmapped_area = arch_get_unmapped_area;
21436 mm->unmap_area = arch_unmap_area;
21437 } else {
21438- mm->mmap_base = mmap_base();
21439+ mm->mmap_base = mmap_base(mm);
21440+
21441+#ifdef CONFIG_PAX_RANDMMAP
21442+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21443+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21444+#endif
21445+
21446 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21447 mm->unmap_area = arch_unmap_area_topdown;
21448 }
21449diff -urNp linux-3.1.1/arch/x86/mm/mmio-mod.c linux-3.1.1/arch/x86/mm/mmio-mod.c
21450--- linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-11 15:19:27.000000000 -0500
21451+++ linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-16 18:39:07.000000000 -0500
21452@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21453 break;
21454 default:
21455 {
21456- unsigned char *ip = (unsigned char *)instptr;
21457+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21458 my_trace->opcode = MMIO_UNKNOWN_OP;
21459 my_trace->width = 0;
21460 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21461@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21462 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21463 void __iomem *addr)
21464 {
21465- static atomic_t next_id;
21466+ static atomic_unchecked_t next_id;
21467 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21468 /* These are page-unaligned. */
21469 struct mmiotrace_map map = {
21470@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21471 .private = trace
21472 },
21473 .phys = offset,
21474- .id = atomic_inc_return(&next_id)
21475+ .id = atomic_inc_return_unchecked(&next_id)
21476 };
21477 map.map_id = trace->id;
21478
21479diff -urNp linux-3.1.1/arch/x86/mm/pageattr.c linux-3.1.1/arch/x86/mm/pageattr.c
21480--- linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-11 15:19:27.000000000 -0500
21481+++ linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-16 18:39:07.000000000 -0500
21482@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21483 */
21484 #ifdef CONFIG_PCI_BIOS
21485 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21486- pgprot_val(forbidden) |= _PAGE_NX;
21487+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21488 #endif
21489
21490 /*
21491@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21492 * Does not cover __inittext since that is gone later on. On
21493 * 64bit we do not enforce !NX on the low mapping
21494 */
21495- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21496- pgprot_val(forbidden) |= _PAGE_NX;
21497+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21498+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21499
21500+#ifdef CONFIG_DEBUG_RODATA
21501 /*
21502 * The .rodata section needs to be read-only. Using the pfn
21503 * catches all aliases.
21504@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21505 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21506 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21507 pgprot_val(forbidden) |= _PAGE_RW;
21508+#endif
21509
21510 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21511 /*
21512@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21513 }
21514 #endif
21515
21516+#ifdef CONFIG_PAX_KERNEXEC
21517+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21518+ pgprot_val(forbidden) |= _PAGE_RW;
21519+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21520+ }
21521+#endif
21522+
21523 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21524
21525 return prot;
21526@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21527 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21528 {
21529 /* change init_mm */
21530+ pax_open_kernel();
21531 set_pte_atomic(kpte, pte);
21532+
21533 #ifdef CONFIG_X86_32
21534 if (!SHARED_KERNEL_PMD) {
21535+
21536+#ifdef CONFIG_PAX_PER_CPU_PGD
21537+ unsigned long cpu;
21538+#else
21539 struct page *page;
21540+#endif
21541
21542+#ifdef CONFIG_PAX_PER_CPU_PGD
21543+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21544+ pgd_t *pgd = get_cpu_pgd(cpu);
21545+#else
21546 list_for_each_entry(page, &pgd_list, lru) {
21547- pgd_t *pgd;
21548+ pgd_t *pgd = (pgd_t *)page_address(page);
21549+#endif
21550+
21551 pud_t *pud;
21552 pmd_t *pmd;
21553
21554- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21555+ pgd += pgd_index(address);
21556 pud = pud_offset(pgd, address);
21557 pmd = pmd_offset(pud, address);
21558 set_pte_atomic((pte_t *)pmd, pte);
21559 }
21560 }
21561 #endif
21562+ pax_close_kernel();
21563 }
21564
21565 static int
21566diff -urNp linux-3.1.1/arch/x86/mm/pageattr-test.c linux-3.1.1/arch/x86/mm/pageattr-test.c
21567--- linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-11 15:19:27.000000000 -0500
21568+++ linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-16 18:39:07.000000000 -0500
21569@@ -36,7 +36,7 @@ enum {
21570
21571 static int pte_testbit(pte_t pte)
21572 {
21573- return pte_flags(pte) & _PAGE_UNUSED1;
21574+ return pte_flags(pte) & _PAGE_CPA_TEST;
21575 }
21576
21577 struct split_state {
21578diff -urNp linux-3.1.1/arch/x86/mm/pat.c linux-3.1.1/arch/x86/mm/pat.c
21579--- linux-3.1.1/arch/x86/mm/pat.c 2011-11-11 15:19:27.000000000 -0500
21580+++ linux-3.1.1/arch/x86/mm/pat.c 2011-11-16 18:39:07.000000000 -0500
21581@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21582
21583 if (!entry) {
21584 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21585- current->comm, current->pid, start, end);
21586+ current->comm, task_pid_nr(current), start, end);
21587 return -EINVAL;
21588 }
21589
21590@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21591 while (cursor < to) {
21592 if (!devmem_is_allowed(pfn)) {
21593 printk(KERN_INFO
21594- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21595- current->comm, from, to);
21596+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21597+ current->comm, from, to, cursor);
21598 return 0;
21599 }
21600 cursor += PAGE_SIZE;
21601@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21602 printk(KERN_INFO
21603 "%s:%d ioremap_change_attr failed %s "
21604 "for %Lx-%Lx\n",
21605- current->comm, current->pid,
21606+ current->comm, task_pid_nr(current),
21607 cattr_name(flags),
21608 base, (unsigned long long)(base + size));
21609 return -EINVAL;
21610@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21611 if (want_flags != flags) {
21612 printk(KERN_WARNING
21613 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21614- current->comm, current->pid,
21615+ current->comm, task_pid_nr(current),
21616 cattr_name(want_flags),
21617 (unsigned long long)paddr,
21618 (unsigned long long)(paddr + size),
21619@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21620 free_memtype(paddr, paddr + size);
21621 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21622 " for %Lx-%Lx, got %s\n",
21623- current->comm, current->pid,
21624+ current->comm, task_pid_nr(current),
21625 cattr_name(want_flags),
21626 (unsigned long long)paddr,
21627 (unsigned long long)(paddr + size),
21628diff -urNp linux-3.1.1/arch/x86/mm/pf_in.c linux-3.1.1/arch/x86/mm/pf_in.c
21629--- linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-11 15:19:27.000000000 -0500
21630+++ linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-16 18:39:07.000000000 -0500
21631@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21632 int i;
21633 enum reason_type rv = OTHERS;
21634
21635- p = (unsigned char *)ins_addr;
21636+ p = (unsigned char *)ktla_ktva(ins_addr);
21637 p += skip_prefix(p, &prf);
21638 p += get_opcode(p, &opcode);
21639
21640@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21641 struct prefix_bits prf;
21642 int i;
21643
21644- p = (unsigned char *)ins_addr;
21645+ p = (unsigned char *)ktla_ktva(ins_addr);
21646 p += skip_prefix(p, &prf);
21647 p += get_opcode(p, &opcode);
21648
21649@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21650 struct prefix_bits prf;
21651 int i;
21652
21653- p = (unsigned char *)ins_addr;
21654+ p = (unsigned char *)ktla_ktva(ins_addr);
21655 p += skip_prefix(p, &prf);
21656 p += get_opcode(p, &opcode);
21657
21658@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21659 struct prefix_bits prf;
21660 int i;
21661
21662- p = (unsigned char *)ins_addr;
21663+ p = (unsigned char *)ktla_ktva(ins_addr);
21664 p += skip_prefix(p, &prf);
21665 p += get_opcode(p, &opcode);
21666 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21667@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21668 struct prefix_bits prf;
21669 int i;
21670
21671- p = (unsigned char *)ins_addr;
21672+ p = (unsigned char *)ktla_ktva(ins_addr);
21673 p += skip_prefix(p, &prf);
21674 p += get_opcode(p, &opcode);
21675 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21676diff -urNp linux-3.1.1/arch/x86/mm/pgtable_32.c linux-3.1.1/arch/x86/mm/pgtable_32.c
21677--- linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-11 15:19:27.000000000 -0500
21678+++ linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-16 18:39:07.000000000 -0500
21679@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21680 return;
21681 }
21682 pte = pte_offset_kernel(pmd, vaddr);
21683+
21684+ pax_open_kernel();
21685 if (pte_val(pteval))
21686 set_pte_at(&init_mm, vaddr, pte, pteval);
21687 else
21688 pte_clear(&init_mm, vaddr, pte);
21689+ pax_close_kernel();
21690
21691 /*
21692 * It's enough to flush this one mapping.
21693diff -urNp linux-3.1.1/arch/x86/mm/pgtable.c linux-3.1.1/arch/x86/mm/pgtable.c
21694--- linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-11 15:19:27.000000000 -0500
21695+++ linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-16 18:39:07.000000000 -0500
21696@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21697 list_del(&page->lru);
21698 }
21699
21700-#define UNSHARED_PTRS_PER_PGD \
21701- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21702+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21703+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21704
21705+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21706+{
21707+ while (count--)
21708+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21709+}
21710+#endif
21711+
21712+#ifdef CONFIG_PAX_PER_CPU_PGD
21713+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21714+{
21715+ while (count--)
21716+
21717+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21718+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21719+#else
21720+ *dst++ = *src++;
21721+#endif
21722
21723+}
21724+#endif
21725+
21726+#ifdef CONFIG_X86_64
21727+#define pxd_t pud_t
21728+#define pyd_t pgd_t
21729+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21730+#define pxd_free(mm, pud) pud_free((mm), (pud))
21731+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21732+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21733+#define PYD_SIZE PGDIR_SIZE
21734+#else
21735+#define pxd_t pmd_t
21736+#define pyd_t pud_t
21737+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21738+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21739+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21740+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21741+#define PYD_SIZE PUD_SIZE
21742+#endif
21743+
21744+#ifdef CONFIG_PAX_PER_CPU_PGD
21745+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21746+static inline void pgd_dtor(pgd_t *pgd) {}
21747+#else
21748 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21749 {
21750 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21751@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21752 pgd_list_del(pgd);
21753 spin_unlock(&pgd_lock);
21754 }
21755+#endif
21756
21757 /*
21758 * List of all pgd's needed for non-PAE so it can invalidate entries
21759@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21760 * -- wli
21761 */
21762
21763-#ifdef CONFIG_X86_PAE
21764+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21765 /*
21766 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21767 * updating the top-level pagetable entries to guarantee the
21768@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21769 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21770 * and initialize the kernel pmds here.
21771 */
21772-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21773+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21774
21775 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21776 {
21777@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21778 */
21779 flush_tlb_mm(mm);
21780 }
21781+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21782+#define PREALLOCATED_PXDS USER_PGD_PTRS
21783 #else /* !CONFIG_X86_PAE */
21784
21785 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21786-#define PREALLOCATED_PMDS 0
21787+#define PREALLOCATED_PXDS 0
21788
21789 #endif /* CONFIG_X86_PAE */
21790
21791-static void free_pmds(pmd_t *pmds[])
21792+static void free_pxds(pxd_t *pxds[])
21793 {
21794 int i;
21795
21796- for(i = 0; i < PREALLOCATED_PMDS; i++)
21797- if (pmds[i])
21798- free_page((unsigned long)pmds[i]);
21799+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21800+ if (pxds[i])
21801+ free_page((unsigned long)pxds[i]);
21802 }
21803
21804-static int preallocate_pmds(pmd_t *pmds[])
21805+static int preallocate_pxds(pxd_t *pxds[])
21806 {
21807 int i;
21808 bool failed = false;
21809
21810- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21811- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21812- if (pmd == NULL)
21813+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21814+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21815+ if (pxd == NULL)
21816 failed = true;
21817- pmds[i] = pmd;
21818+ pxds[i] = pxd;
21819 }
21820
21821 if (failed) {
21822- free_pmds(pmds);
21823+ free_pxds(pxds);
21824 return -ENOMEM;
21825 }
21826
21827@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21828 * preallocate which never got a corresponding vma will need to be
21829 * freed manually.
21830 */
21831-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21832+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21833 {
21834 int i;
21835
21836- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21837+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21838 pgd_t pgd = pgdp[i];
21839
21840 if (pgd_val(pgd) != 0) {
21841- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21842+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21843
21844- pgdp[i] = native_make_pgd(0);
21845+ set_pgd(pgdp + i, native_make_pgd(0));
21846
21847- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21848- pmd_free(mm, pmd);
21849+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21850+ pxd_free(mm, pxd);
21851 }
21852 }
21853 }
21854
21855-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21856+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21857 {
21858- pud_t *pud;
21859+ pyd_t *pyd;
21860 unsigned long addr;
21861 int i;
21862
21863- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21864+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21865 return;
21866
21867- pud = pud_offset(pgd, 0);
21868+#ifdef CONFIG_X86_64
21869+ pyd = pyd_offset(mm, 0L);
21870+#else
21871+ pyd = pyd_offset(pgd, 0L);
21872+#endif
21873
21874- for (addr = i = 0; i < PREALLOCATED_PMDS;
21875- i++, pud++, addr += PUD_SIZE) {
21876- pmd_t *pmd = pmds[i];
21877+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21878+ i++, pyd++, addr += PYD_SIZE) {
21879+ pxd_t *pxd = pxds[i];
21880
21881 if (i >= KERNEL_PGD_BOUNDARY)
21882- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21883- sizeof(pmd_t) * PTRS_PER_PMD);
21884+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21885+ sizeof(pxd_t) * PTRS_PER_PMD);
21886
21887- pud_populate(mm, pud, pmd);
21888+ pyd_populate(mm, pyd, pxd);
21889 }
21890 }
21891
21892 pgd_t *pgd_alloc(struct mm_struct *mm)
21893 {
21894 pgd_t *pgd;
21895- pmd_t *pmds[PREALLOCATED_PMDS];
21896+ pxd_t *pxds[PREALLOCATED_PXDS];
21897
21898 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21899
21900@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21901
21902 mm->pgd = pgd;
21903
21904- if (preallocate_pmds(pmds) != 0)
21905+ if (preallocate_pxds(pxds) != 0)
21906 goto out_free_pgd;
21907
21908 if (paravirt_pgd_alloc(mm) != 0)
21909- goto out_free_pmds;
21910+ goto out_free_pxds;
21911
21912 /*
21913 * Make sure that pre-populating the pmds is atomic with
21914@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21915 spin_lock(&pgd_lock);
21916
21917 pgd_ctor(mm, pgd);
21918- pgd_prepopulate_pmd(mm, pgd, pmds);
21919+ pgd_prepopulate_pxd(mm, pgd, pxds);
21920
21921 spin_unlock(&pgd_lock);
21922
21923 return pgd;
21924
21925-out_free_pmds:
21926- free_pmds(pmds);
21927+out_free_pxds:
21928+ free_pxds(pxds);
21929 out_free_pgd:
21930 free_page((unsigned long)pgd);
21931 out:
21932@@ -295,7 +344,7 @@ out:
21933
21934 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21935 {
21936- pgd_mop_up_pmds(mm, pgd);
21937+ pgd_mop_up_pxds(mm, pgd);
21938 pgd_dtor(pgd);
21939 paravirt_pgd_free(mm, pgd);
21940 free_page((unsigned long)pgd);
21941diff -urNp linux-3.1.1/arch/x86/mm/setup_nx.c linux-3.1.1/arch/x86/mm/setup_nx.c
21942--- linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-11 15:19:27.000000000 -0500
21943+++ linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-16 18:39:07.000000000 -0500
21944@@ -5,8 +5,10 @@
21945 #include <asm/pgtable.h>
21946 #include <asm/proto.h>
21947
21948+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21949 static int disable_nx __cpuinitdata;
21950
21951+#ifndef CONFIG_PAX_PAGEEXEC
21952 /*
21953 * noexec = on|off
21954 *
21955@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21956 return 0;
21957 }
21958 early_param("noexec", noexec_setup);
21959+#endif
21960+
21961+#endif
21962
21963 void __cpuinit x86_configure_nx(void)
21964 {
21965+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21966 if (cpu_has_nx && !disable_nx)
21967 __supported_pte_mask |= _PAGE_NX;
21968 else
21969+#endif
21970 __supported_pte_mask &= ~_PAGE_NX;
21971 }
21972
21973diff -urNp linux-3.1.1/arch/x86/mm/tlb.c linux-3.1.1/arch/x86/mm/tlb.c
21974--- linux-3.1.1/arch/x86/mm/tlb.c 2011-11-11 15:19:27.000000000 -0500
21975+++ linux-3.1.1/arch/x86/mm/tlb.c 2011-11-16 18:39:07.000000000 -0500
21976@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21977 BUG();
21978 cpumask_clear_cpu(cpu,
21979 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21980+
21981+#ifndef CONFIG_PAX_PER_CPU_PGD
21982 load_cr3(swapper_pg_dir);
21983+#endif
21984+
21985 }
21986 EXPORT_SYMBOL_GPL(leave_mm);
21987
21988diff -urNp linux-3.1.1/arch/x86/net/bpf_jit_comp.c linux-3.1.1/arch/x86/net/bpf_jit_comp.c
21989--- linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-11 15:19:27.000000000 -0500
21990+++ linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-16 18:39:07.000000000 -0500
21991@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21992 module_free(NULL, image);
21993 return;
21994 }
21995+ pax_open_kernel();
21996 memcpy(image + proglen, temp, ilen);
21997+ pax_close_kernel();
21998 }
21999 proglen += ilen;
22000 addrs[i] = proglen;
22001@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
22002 break;
22003 }
22004 if (proglen == oldproglen) {
22005- image = module_alloc(max_t(unsigned int,
22006+ image = module_alloc_exec(max_t(unsigned int,
22007 proglen,
22008 sizeof(struct work_struct)));
22009 if (!image)
22010diff -urNp linux-3.1.1/arch/x86/net/bpf_jit.S linux-3.1.1/arch/x86/net/bpf_jit.S
22011--- linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-11 15:19:27.000000000 -0500
22012+++ linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-16 18:39:07.000000000 -0500
22013@@ -9,6 +9,7 @@
22014 */
22015 #include <linux/linkage.h>
22016 #include <asm/dwarf2.h>
22017+#include <asm/alternative-asm.h>
22018
22019 /*
22020 * Calling convention :
22021@@ -35,6 +36,7 @@ sk_load_word:
22022 jle bpf_slow_path_word
22023 mov (SKBDATA,%rsi),%eax
22024 bswap %eax /* ntohl() */
22025+ pax_force_retaddr
22026 ret
22027
22028
22029@@ -53,6 +55,7 @@ sk_load_half:
22030 jle bpf_slow_path_half
22031 movzwl (SKBDATA,%rsi),%eax
22032 rol $8,%ax # ntohs()
22033+ pax_force_retaddr
22034 ret
22035
22036 sk_load_byte_ind:
22037@@ -66,6 +69,7 @@ sk_load_byte:
22038 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
22039 jle bpf_slow_path_byte
22040 movzbl (SKBDATA,%rsi),%eax
22041+ pax_force_retaddr
22042 ret
22043
22044 /**
22045@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
22046 movzbl (SKBDATA,%rsi),%ebx
22047 and $15,%bl
22048 shl $2,%bl
22049+ pax_force_retaddr
22050 ret
22051 CFI_ENDPROC
22052 ENDPROC(sk_load_byte_msh)
22053@@ -91,6 +96,7 @@ bpf_error:
22054 xor %eax,%eax
22055 mov -8(%rbp),%rbx
22056 leaveq
22057+ pax_force_retaddr
22058 ret
22059
22060 /* rsi contains offset and can be scratched */
22061@@ -113,6 +119,7 @@ bpf_slow_path_word:
22062 js bpf_error
22063 mov -12(%rbp),%eax
22064 bswap %eax
22065+ pax_force_retaddr
22066 ret
22067
22068 bpf_slow_path_half:
22069@@ -121,12 +128,14 @@ bpf_slow_path_half:
22070 mov -12(%rbp),%ax
22071 rol $8,%ax
22072 movzwl %ax,%eax
22073+ pax_force_retaddr
22074 ret
22075
22076 bpf_slow_path_byte:
22077 bpf_slow_path_common(1)
22078 js bpf_error
22079 movzbl -12(%rbp),%eax
22080+ pax_force_retaddr
22081 ret
22082
22083 bpf_slow_path_byte_msh:
22084@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22085 and $15,%al
22086 shl $2,%al
22087 xchg %eax,%ebx
22088+ pax_force_retaddr
22089 ret
22090diff -urNp linux-3.1.1/arch/x86/oprofile/backtrace.c linux-3.1.1/arch/x86/oprofile/backtrace.c
22091--- linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-11 15:19:27.000000000 -0500
22092+++ linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-16 18:39:07.000000000 -0500
22093@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
22094 struct stack_frame_ia32 *fp;
22095 unsigned long bytes;
22096
22097- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22098+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22099 if (bytes != sizeof(bufhead))
22100 return NULL;
22101
22102- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22103+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22104
22105 oprofile_add_trace(bufhead[0].return_address);
22106
22107@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
22108 struct stack_frame bufhead[2];
22109 unsigned long bytes;
22110
22111- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22112+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22113 if (bytes != sizeof(bufhead))
22114 return NULL;
22115
22116@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
22117 {
22118 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22119
22120- if (!user_mode_vm(regs)) {
22121+ if (!user_mode(regs)) {
22122 unsigned long stack = kernel_stack_pointer(regs);
22123 if (depth)
22124 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22125diff -urNp linux-3.1.1/arch/x86/pci/mrst.c linux-3.1.1/arch/x86/pci/mrst.c
22126--- linux-3.1.1/arch/x86/pci/mrst.c 2011-11-11 15:19:27.000000000 -0500
22127+++ linux-3.1.1/arch/x86/pci/mrst.c 2011-11-16 18:39:07.000000000 -0500
22128@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22129 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22130 pci_mmcfg_late_init();
22131 pcibios_enable_irq = mrst_pci_irq_enable;
22132- pci_root_ops = pci_mrst_ops;
22133+ pax_open_kernel();
22134+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22135+ pax_close_kernel();
22136 /* Continue with standard init */
22137 return 1;
22138 }
22139diff -urNp linux-3.1.1/arch/x86/pci/pcbios.c linux-3.1.1/arch/x86/pci/pcbios.c
22140--- linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-11 15:19:27.000000000 -0500
22141+++ linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-16 18:39:07.000000000 -0500
22142@@ -79,50 +79,93 @@ union bios32 {
22143 static struct {
22144 unsigned long address;
22145 unsigned short segment;
22146-} bios32_indirect = { 0, __KERNEL_CS };
22147+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22148
22149 /*
22150 * Returns the entry point for the given service, NULL on error
22151 */
22152
22153-static unsigned long bios32_service(unsigned long service)
22154+static unsigned long __devinit bios32_service(unsigned long service)
22155 {
22156 unsigned char return_code; /* %al */
22157 unsigned long address; /* %ebx */
22158 unsigned long length; /* %ecx */
22159 unsigned long entry; /* %edx */
22160 unsigned long flags;
22161+ struct desc_struct d, *gdt;
22162
22163 local_irq_save(flags);
22164- __asm__("lcall *(%%edi); cld"
22165+
22166+ gdt = get_cpu_gdt_table(smp_processor_id());
22167+
22168+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22169+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22170+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22171+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22172+
22173+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22174 : "=a" (return_code),
22175 "=b" (address),
22176 "=c" (length),
22177 "=d" (entry)
22178 : "0" (service),
22179 "1" (0),
22180- "D" (&bios32_indirect));
22181+ "D" (&bios32_indirect),
22182+ "r"(__PCIBIOS_DS)
22183+ : "memory");
22184+
22185+ pax_open_kernel();
22186+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22187+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22188+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22189+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22190+ pax_close_kernel();
22191+
22192 local_irq_restore(flags);
22193
22194 switch (return_code) {
22195- case 0:
22196- return address + entry;
22197- case 0x80: /* Not present */
22198- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22199- return 0;
22200- default: /* Shouldn't happen */
22201- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22202- service, return_code);
22203+ case 0: {
22204+ int cpu;
22205+ unsigned char flags;
22206+
22207+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22208+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22209+ printk(KERN_WARNING "bios32_service: not valid\n");
22210 return 0;
22211+ }
22212+ address = address + PAGE_OFFSET;
22213+ length += 16UL; /* some BIOSs underreport this... */
22214+ flags = 4;
22215+ if (length >= 64*1024*1024) {
22216+ length >>= PAGE_SHIFT;
22217+ flags |= 8;
22218+ }
22219+
22220+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22221+ gdt = get_cpu_gdt_table(cpu);
22222+ pack_descriptor(&d, address, length, 0x9b, flags);
22223+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22224+ pack_descriptor(&d, address, length, 0x93, flags);
22225+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22226+ }
22227+ return entry;
22228+ }
22229+ case 0x80: /* Not present */
22230+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22231+ return 0;
22232+ default: /* Shouldn't happen */
22233+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22234+ service, return_code);
22235+ return 0;
22236 }
22237 }
22238
22239 static struct {
22240 unsigned long address;
22241 unsigned short segment;
22242-} pci_indirect = { 0, __KERNEL_CS };
22243+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22244
22245-static int pci_bios_present;
22246+static int pci_bios_present __read_only;
22247
22248 static int __devinit check_pcibios(void)
22249 {
22250@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
22251 unsigned long flags, pcibios_entry;
22252
22253 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22254- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22255+ pci_indirect.address = pcibios_entry;
22256
22257 local_irq_save(flags);
22258- __asm__(
22259- "lcall *(%%edi); cld\n\t"
22260+ __asm__("movw %w6, %%ds\n\t"
22261+ "lcall *%%ss:(%%edi); cld\n\t"
22262+ "push %%ss\n\t"
22263+ "pop %%ds\n\t"
22264 "jc 1f\n\t"
22265 "xor %%ah, %%ah\n"
22266 "1:"
22267@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
22268 "=b" (ebx),
22269 "=c" (ecx)
22270 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22271- "D" (&pci_indirect)
22272+ "D" (&pci_indirect),
22273+ "r" (__PCIBIOS_DS)
22274 : "memory");
22275 local_irq_restore(flags);
22276
22277@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
22278
22279 switch (len) {
22280 case 1:
22281- __asm__("lcall *(%%esi); cld\n\t"
22282+ __asm__("movw %w6, %%ds\n\t"
22283+ "lcall *%%ss:(%%esi); cld\n\t"
22284+ "push %%ss\n\t"
22285+ "pop %%ds\n\t"
22286 "jc 1f\n\t"
22287 "xor %%ah, %%ah\n"
22288 "1:"
22289@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
22290 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22291 "b" (bx),
22292 "D" ((long)reg),
22293- "S" (&pci_indirect));
22294+ "S" (&pci_indirect),
22295+ "r" (__PCIBIOS_DS));
22296 /*
22297 * Zero-extend the result beyond 8 bits, do not trust the
22298 * BIOS having done it:
22299@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
22300 *value &= 0xff;
22301 break;
22302 case 2:
22303- __asm__("lcall *(%%esi); cld\n\t"
22304+ __asm__("movw %w6, %%ds\n\t"
22305+ "lcall *%%ss:(%%esi); cld\n\t"
22306+ "push %%ss\n\t"
22307+ "pop %%ds\n\t"
22308 "jc 1f\n\t"
22309 "xor %%ah, %%ah\n"
22310 "1:"
22311@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
22312 : "1" (PCIBIOS_READ_CONFIG_WORD),
22313 "b" (bx),
22314 "D" ((long)reg),
22315- "S" (&pci_indirect));
22316+ "S" (&pci_indirect),
22317+ "r" (__PCIBIOS_DS));
22318 /*
22319 * Zero-extend the result beyond 16 bits, do not trust the
22320 * BIOS having done it:
22321@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
22322 *value &= 0xffff;
22323 break;
22324 case 4:
22325- __asm__("lcall *(%%esi); cld\n\t"
22326+ __asm__("movw %w6, %%ds\n\t"
22327+ "lcall *%%ss:(%%esi); cld\n\t"
22328+ "push %%ss\n\t"
22329+ "pop %%ds\n\t"
22330 "jc 1f\n\t"
22331 "xor %%ah, %%ah\n"
22332 "1:"
22333@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
22334 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22335 "b" (bx),
22336 "D" ((long)reg),
22337- "S" (&pci_indirect));
22338+ "S" (&pci_indirect),
22339+ "r" (__PCIBIOS_DS));
22340 break;
22341 }
22342
22343@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
22344
22345 switch (len) {
22346 case 1:
22347- __asm__("lcall *(%%esi); cld\n\t"
22348+ __asm__("movw %w6, %%ds\n\t"
22349+ "lcall *%%ss:(%%esi); cld\n\t"
22350+ "push %%ss\n\t"
22351+ "pop %%ds\n\t"
22352 "jc 1f\n\t"
22353 "xor %%ah, %%ah\n"
22354 "1:"
22355@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
22356 "c" (value),
22357 "b" (bx),
22358 "D" ((long)reg),
22359- "S" (&pci_indirect));
22360+ "S" (&pci_indirect),
22361+ "r" (__PCIBIOS_DS));
22362 break;
22363 case 2:
22364- __asm__("lcall *(%%esi); cld\n\t"
22365+ __asm__("movw %w6, %%ds\n\t"
22366+ "lcall *%%ss:(%%esi); cld\n\t"
22367+ "push %%ss\n\t"
22368+ "pop %%ds\n\t"
22369 "jc 1f\n\t"
22370 "xor %%ah, %%ah\n"
22371 "1:"
22372@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
22373 "c" (value),
22374 "b" (bx),
22375 "D" ((long)reg),
22376- "S" (&pci_indirect));
22377+ "S" (&pci_indirect),
22378+ "r" (__PCIBIOS_DS));
22379 break;
22380 case 4:
22381- __asm__("lcall *(%%esi); cld\n\t"
22382+ __asm__("movw %w6, %%ds\n\t"
22383+ "lcall *%%ss:(%%esi); cld\n\t"
22384+ "push %%ss\n\t"
22385+ "pop %%ds\n\t"
22386 "jc 1f\n\t"
22387 "xor %%ah, %%ah\n"
22388 "1:"
22389@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
22390 "c" (value),
22391 "b" (bx),
22392 "D" ((long)reg),
22393- "S" (&pci_indirect));
22394+ "S" (&pci_indirect),
22395+ "r" (__PCIBIOS_DS));
22396 break;
22397 }
22398
22399@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
22400
22401 DBG("PCI: Fetching IRQ routing table... ");
22402 __asm__("push %%es\n\t"
22403+ "movw %w8, %%ds\n\t"
22404 "push %%ds\n\t"
22405 "pop %%es\n\t"
22406- "lcall *(%%esi); cld\n\t"
22407+ "lcall *%%ss:(%%esi); cld\n\t"
22408 "pop %%es\n\t"
22409+ "push %%ss\n\t"
22410+ "pop %%ds\n"
22411 "jc 1f\n\t"
22412 "xor %%ah, %%ah\n"
22413 "1:"
22414@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
22415 "1" (0),
22416 "D" ((long) &opt),
22417 "S" (&pci_indirect),
22418- "m" (opt)
22419+ "m" (opt),
22420+ "r" (__PCIBIOS_DS)
22421 : "memory");
22422 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22423 if (ret & 0xff00)
22424@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
22425 {
22426 int ret;
22427
22428- __asm__("lcall *(%%esi); cld\n\t"
22429+ __asm__("movw %w5, %%ds\n\t"
22430+ "lcall *%%ss:(%%esi); cld\n\t"
22431+ "push %%ss\n\t"
22432+ "pop %%ds\n"
22433 "jc 1f\n\t"
22434 "xor %%ah, %%ah\n"
22435 "1:"
22436@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
22437 : "0" (PCIBIOS_SET_PCI_HW_INT),
22438 "b" ((dev->bus->number << 8) | dev->devfn),
22439 "c" ((irq << 8) | (pin + 10)),
22440- "S" (&pci_indirect));
22441+ "S" (&pci_indirect),
22442+ "r" (__PCIBIOS_DS));
22443 return !(ret & 0xff00);
22444 }
22445 EXPORT_SYMBOL(pcibios_set_irq_routing);
22446diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_32.c linux-3.1.1/arch/x86/platform/efi/efi_32.c
22447--- linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-11 15:19:27.000000000 -0500
22448+++ linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-16 18:39:07.000000000 -0500
22449@@ -38,70 +38,56 @@
22450 */
22451
22452 static unsigned long efi_rt_eflags;
22453-static pgd_t efi_bak_pg_dir_pointer[2];
22454+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22455
22456-void efi_call_phys_prelog(void)
22457+void __init efi_call_phys_prelog(void)
22458 {
22459- unsigned long cr4;
22460- unsigned long temp;
22461 struct desc_ptr gdt_descr;
22462
22463- local_irq_save(efi_rt_eflags);
22464+#ifdef CONFIG_PAX_KERNEXEC
22465+ struct desc_struct d;
22466+#endif
22467
22468- /*
22469- * If I don't have PAE, I should just duplicate two entries in page
22470- * directory. If I have PAE, I just need to duplicate one entry in
22471- * page directory.
22472- */
22473- cr4 = read_cr4_safe();
22474+ local_irq_save(efi_rt_eflags);
22475
22476- if (cr4 & X86_CR4_PAE) {
22477- efi_bak_pg_dir_pointer[0].pgd =
22478- swapper_pg_dir[pgd_index(0)].pgd;
22479- swapper_pg_dir[0].pgd =
22480- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22481- } else {
22482- efi_bak_pg_dir_pointer[0].pgd =
22483- swapper_pg_dir[pgd_index(0)].pgd;
22484- efi_bak_pg_dir_pointer[1].pgd =
22485- swapper_pg_dir[pgd_index(0x400000)].pgd;
22486- swapper_pg_dir[pgd_index(0)].pgd =
22487- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22488- temp = PAGE_OFFSET + 0x400000;
22489- swapper_pg_dir[pgd_index(0x400000)].pgd =
22490- swapper_pg_dir[pgd_index(temp)].pgd;
22491- }
22492+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22493+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22494+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22495
22496 /*
22497 * After the lock is released, the original page table is restored.
22498 */
22499 __flush_tlb_all();
22500
22501+#ifdef CONFIG_PAX_KERNEXEC
22502+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22503+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22504+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22505+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22506+#endif
22507+
22508 gdt_descr.address = __pa(get_cpu_gdt_table(0));
22509 gdt_descr.size = GDT_SIZE - 1;
22510 load_gdt(&gdt_descr);
22511 }
22512
22513-void efi_call_phys_epilog(void)
22514+void __init efi_call_phys_epilog(void)
22515 {
22516- unsigned long cr4;
22517 struct desc_ptr gdt_descr;
22518
22519+#ifdef CONFIG_PAX_KERNEXEC
22520+ struct desc_struct d;
22521+
22522+ memset(&d, 0, sizeof d);
22523+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22524+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22525+#endif
22526+
22527 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22528 gdt_descr.size = GDT_SIZE - 1;
22529 load_gdt(&gdt_descr);
22530
22531- cr4 = read_cr4_safe();
22532-
22533- if (cr4 & X86_CR4_PAE) {
22534- swapper_pg_dir[pgd_index(0)].pgd =
22535- efi_bak_pg_dir_pointer[0].pgd;
22536- } else {
22537- swapper_pg_dir[pgd_index(0)].pgd =
22538- efi_bak_pg_dir_pointer[0].pgd;
22539- swapper_pg_dir[pgd_index(0x400000)].pgd =
22540- efi_bak_pg_dir_pointer[1].pgd;
22541- }
22542+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22543
22544 /*
22545 * After the lock is released, the original page table is restored.
22546diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S
22547--- linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-11 15:19:27.000000000 -0500
22548+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-16 18:39:07.000000000 -0500
22549@@ -6,7 +6,9 @@
22550 */
22551
22552 #include <linux/linkage.h>
22553+#include <linux/init.h>
22554 #include <asm/page_types.h>
22555+#include <asm/segment.h>
22556
22557 /*
22558 * efi_call_phys(void *, ...) is a function with variable parameters.
22559@@ -20,7 +22,7 @@
22560 * service functions will comply with gcc calling convention, too.
22561 */
22562
22563-.text
22564+__INIT
22565 ENTRY(efi_call_phys)
22566 /*
22567 * 0. The function can only be called in Linux kernel. So CS has been
22568@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22569 * The mapping of lower virtual memory has been created in prelog and
22570 * epilog.
22571 */
22572- movl $1f, %edx
22573- subl $__PAGE_OFFSET, %edx
22574- jmp *%edx
22575+ movl $(__KERNEXEC_EFI_DS), %edx
22576+ mov %edx, %ds
22577+ mov %edx, %es
22578+ mov %edx, %ss
22579+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22580 1:
22581
22582 /*
22583@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22584 * parameter 2, ..., param n. To make things easy, we save the return
22585 * address of efi_call_phys in a global variable.
22586 */
22587- popl %edx
22588- movl %edx, saved_return_addr
22589- /* get the function pointer into ECX*/
22590- popl %ecx
22591- movl %ecx, efi_rt_function_ptr
22592- movl $2f, %edx
22593- subl $__PAGE_OFFSET, %edx
22594- pushl %edx
22595+ popl (saved_return_addr)
22596+ popl (efi_rt_function_ptr)
22597
22598 /*
22599 * 3. Clear PG bit in %CR0.
22600@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22601 /*
22602 * 5. Call the physical function.
22603 */
22604- jmp *%ecx
22605+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22606
22607-2:
22608 /*
22609 * 6. After EFI runtime service returns, control will return to
22610 * following instruction. We'd better readjust stack pointer first.
22611@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22612 movl %cr0, %edx
22613 orl $0x80000000, %edx
22614 movl %edx, %cr0
22615- jmp 1f
22616-1:
22617+
22618 /*
22619 * 8. Now restore the virtual mode from flat mode by
22620 * adding EIP with PAGE_OFFSET.
22621 */
22622- movl $1f, %edx
22623- jmp *%edx
22624+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22625 1:
22626+ movl $(__KERNEL_DS), %edx
22627+ mov %edx, %ds
22628+ mov %edx, %es
22629+ mov %edx, %ss
22630
22631 /*
22632 * 9. Balance the stack. And because EAX contain the return value,
22633 * we'd better not clobber it.
22634 */
22635- leal efi_rt_function_ptr, %edx
22636- movl (%edx), %ecx
22637- pushl %ecx
22638+ pushl (efi_rt_function_ptr)
22639
22640 /*
22641- * 10. Push the saved return address onto the stack and return.
22642+ * 10. Return to the saved return address.
22643 */
22644- leal saved_return_addr, %edx
22645- movl (%edx), %ecx
22646- pushl %ecx
22647- ret
22648+ jmpl *(saved_return_addr)
22649 ENDPROC(efi_call_phys)
22650 .previous
22651
22652-.data
22653+__INITDATA
22654 saved_return_addr:
22655 .long 0
22656 efi_rt_function_ptr:
22657diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S
22658--- linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-11 15:19:27.000000000 -0500
22659+++ linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-16 18:39:07.000000000 -0500
22660@@ -7,6 +7,7 @@
22661 */
22662
22663 #include <linux/linkage.h>
22664+#include <asm/alternative-asm.h>
22665
22666 #define SAVE_XMM \
22667 mov %rsp, %rax; \
22668@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22669 call *%rdi
22670 addq $32, %rsp
22671 RESTORE_XMM
22672+ pax_force_retaddr
22673 ret
22674 ENDPROC(efi_call0)
22675
22676@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22677 call *%rdi
22678 addq $32, %rsp
22679 RESTORE_XMM
22680+ pax_force_retaddr
22681 ret
22682 ENDPROC(efi_call1)
22683
22684@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22685 call *%rdi
22686 addq $32, %rsp
22687 RESTORE_XMM
22688+ pax_force_retaddr
22689 ret
22690 ENDPROC(efi_call2)
22691
22692@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22693 call *%rdi
22694 addq $32, %rsp
22695 RESTORE_XMM
22696+ pax_force_retaddr
22697 ret
22698 ENDPROC(efi_call3)
22699
22700@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22701 call *%rdi
22702 addq $32, %rsp
22703 RESTORE_XMM
22704+ pax_force_retaddr
22705 ret
22706 ENDPROC(efi_call4)
22707
22708@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22709 call *%rdi
22710 addq $48, %rsp
22711 RESTORE_XMM
22712+ pax_force_retaddr
22713 ret
22714 ENDPROC(efi_call5)
22715
22716@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22717 call *%rdi
22718 addq $48, %rsp
22719 RESTORE_XMM
22720+ pax_force_retaddr
22721 ret
22722 ENDPROC(efi_call6)
22723diff -urNp linux-3.1.1/arch/x86/platform/mrst/mrst.c linux-3.1.1/arch/x86/platform/mrst/mrst.c
22724--- linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-11 15:19:27.000000000 -0500
22725+++ linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-16 18:39:07.000000000 -0500
22726@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22727 }
22728
22729 /* Reboot and power off are handled by the SCU on a MID device */
22730-static void mrst_power_off(void)
22731+static __noreturn void mrst_power_off(void)
22732 {
22733 intel_scu_ipc_simple_command(0xf1, 1);
22734+ BUG();
22735 }
22736
22737-static void mrst_reboot(void)
22738+static __noreturn void mrst_reboot(void)
22739 {
22740 intel_scu_ipc_simple_command(0xf1, 0);
22741+ BUG();
22742 }
22743
22744 /*
22745diff -urNp linux-3.1.1/arch/x86/platform/uv/tlb_uv.c linux-3.1.1/arch/x86/platform/uv/tlb_uv.c
22746--- linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-11 15:19:27.000000000 -0500
22747+++ linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-16 19:39:11.000000000 -0500
22748@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask
22749 struct bau_control *smaster = bcp->socket_master;
22750 struct reset_args reset_args;
22751
22752+ pax_track_stack();
22753+
22754 reset_args.sender = sender;
22755 cpus_clear(*mask);
22756 /* find a single cpu for each uvhub in this distribution mask */
22757diff -urNp linux-3.1.1/arch/x86/power/cpu.c linux-3.1.1/arch/x86/power/cpu.c
22758--- linux-3.1.1/arch/x86/power/cpu.c 2011-11-11 15:19:27.000000000 -0500
22759+++ linux-3.1.1/arch/x86/power/cpu.c 2011-11-16 18:39:07.000000000 -0500
22760@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22761 static void fix_processor_context(void)
22762 {
22763 int cpu = smp_processor_id();
22764- struct tss_struct *t = &per_cpu(init_tss, cpu);
22765+ struct tss_struct *t = init_tss + cpu;
22766
22767 set_tss_desc(cpu, t); /*
22768 * This just modifies memory; should not be
22769@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22770 */
22771
22772 #ifdef CONFIG_X86_64
22773+ pax_open_kernel();
22774 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22775+ pax_close_kernel();
22776
22777 syscall_init(); /* This sets MSR_*STAR and related */
22778 #endif
22779diff -urNp linux-3.1.1/arch/x86/vdso/Makefile linux-3.1.1/arch/x86/vdso/Makefile
22780--- linux-3.1.1/arch/x86/vdso/Makefile 2011-11-11 15:19:27.000000000 -0500
22781+++ linux-3.1.1/arch/x86/vdso/Makefile 2011-11-16 18:39:07.000000000 -0500
22782@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
22783 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22784 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22785
22786-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22787+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22788 GCOV_PROFILE := n
22789
22790 #
22791diff -urNp linux-3.1.1/arch/x86/vdso/vdso32-setup.c linux-3.1.1/arch/x86/vdso/vdso32-setup.c
22792--- linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-11 15:19:27.000000000 -0500
22793+++ linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-16 18:39:07.000000000 -0500
22794@@ -25,6 +25,7 @@
22795 #include <asm/tlbflush.h>
22796 #include <asm/vdso.h>
22797 #include <asm/proto.h>
22798+#include <asm/mman.h>
22799
22800 enum {
22801 VDSO_DISABLED = 0,
22802@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22803 void enable_sep_cpu(void)
22804 {
22805 int cpu = get_cpu();
22806- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22807+ struct tss_struct *tss = init_tss + cpu;
22808
22809 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22810 put_cpu();
22811@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22812 gate_vma.vm_start = FIXADDR_USER_START;
22813 gate_vma.vm_end = FIXADDR_USER_END;
22814 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22815- gate_vma.vm_page_prot = __P101;
22816+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22817 /*
22818 * Make sure the vDSO gets into every core dump.
22819 * Dumping its contents makes post-mortem fully interpretable later
22820@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22821 if (compat)
22822 addr = VDSO_HIGH_BASE;
22823 else {
22824- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22825+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22826 if (IS_ERR_VALUE(addr)) {
22827 ret = addr;
22828 goto up_fail;
22829 }
22830 }
22831
22832- current->mm->context.vdso = (void *)addr;
22833+ current->mm->context.vdso = addr;
22834
22835 if (compat_uses_vma || !compat) {
22836 /*
22837@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22838 }
22839
22840 current_thread_info()->sysenter_return =
22841- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22842+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22843
22844 up_fail:
22845 if (ret)
22846- current->mm->context.vdso = NULL;
22847+ current->mm->context.vdso = 0;
22848
22849 up_write(&mm->mmap_sem);
22850
22851@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22852
22853 const char *arch_vma_name(struct vm_area_struct *vma)
22854 {
22855- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22856+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22857 return "[vdso]";
22858+
22859+#ifdef CONFIG_PAX_SEGMEXEC
22860+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22861+ return "[vdso]";
22862+#endif
22863+
22864 return NULL;
22865 }
22866
22867@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22868 * Check to see if the corresponding task was created in compat vdso
22869 * mode.
22870 */
22871- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22872+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22873 return &gate_vma;
22874 return NULL;
22875 }
22876diff -urNp linux-3.1.1/arch/x86/vdso/vma.c linux-3.1.1/arch/x86/vdso/vma.c
22877--- linux-3.1.1/arch/x86/vdso/vma.c 2011-11-11 15:19:27.000000000 -0500
22878+++ linux-3.1.1/arch/x86/vdso/vma.c 2011-11-16 18:39:07.000000000 -0500
22879@@ -16,8 +16,6 @@
22880 #include <asm/vdso.h>
22881 #include <asm/page.h>
22882
22883-unsigned int __read_mostly vdso_enabled = 1;
22884-
22885 extern char vdso_start[], vdso_end[];
22886 extern unsigned short vdso_sync_cpuid;
22887
22888@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned
22889 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
22890 {
22891 struct mm_struct *mm = current->mm;
22892- unsigned long addr;
22893+ unsigned long addr = 0;
22894 int ret;
22895
22896- if (!vdso_enabled)
22897- return 0;
22898-
22899 down_write(&mm->mmap_sem);
22900+
22901+#ifdef CONFIG_PAX_RANDMMAP
22902+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22903+#endif
22904+
22905 addr = vdso_addr(mm->start_stack, vdso_size);
22906 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22907 if (IS_ERR_VALUE(addr)) {
22908@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct l
22909 goto up_fail;
22910 }
22911
22912- current->mm->context.vdso = (void *)addr;
22913+ mm->context.vdso = addr;
22914
22915 ret = install_special_mapping(mm, addr, vdso_size,
22916 VM_READ|VM_EXEC|
22917 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22918 VM_ALWAYSDUMP,
22919 vdso_pages);
22920- if (ret) {
22921- current->mm->context.vdso = NULL;
22922- goto up_fail;
22923- }
22924+
22925+ if (ret)
22926+ mm->context.vdso = 0;
22927
22928 up_fail:
22929 up_write(&mm->mmap_sem);
22930 return ret;
22931 }
22932-
22933-static __init int vdso_setup(char *s)
22934-{
22935- vdso_enabled = simple_strtoul(s, NULL, 0);
22936- return 0;
22937-}
22938-__setup("vdso=", vdso_setup);
22939diff -urNp linux-3.1.1/arch/x86/xen/enlighten.c linux-3.1.1/arch/x86/xen/enlighten.c
22940--- linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-11 15:19:27.000000000 -0500
22941+++ linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-16 18:39:07.000000000 -0500
22942@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22943
22944 struct shared_info xen_dummy_shared_info;
22945
22946-void *xen_initial_gdt;
22947-
22948 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22949 __read_mostly int xen_have_vector_callback;
22950 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22951@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic
22952 #endif
22953 };
22954
22955-static void xen_reboot(int reason)
22956+static __noreturn void xen_reboot(int reason)
22957 {
22958 struct sched_shutdown r = { .reason = reason };
22959
22960@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
22961 BUG();
22962 }
22963
22964-static void xen_restart(char *msg)
22965+static __noreturn void xen_restart(char *msg)
22966 {
22967 xen_reboot(SHUTDOWN_reboot);
22968 }
22969
22970-static void xen_emergency_restart(void)
22971+static __noreturn void xen_emergency_restart(void)
22972 {
22973 xen_reboot(SHUTDOWN_reboot);
22974 }
22975
22976-static void xen_machine_halt(void)
22977+static __noreturn void xen_machine_halt(void)
22978 {
22979 xen_reboot(SHUTDOWN_poweroff);
22980 }
22981@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(
22982 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22983
22984 /* Work out if we support NX */
22985- x86_configure_nx();
22986+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22987+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22988+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22989+ unsigned l, h;
22990+
22991+ __supported_pte_mask |= _PAGE_NX;
22992+ rdmsr(MSR_EFER, l, h);
22993+ l |= EFER_NX;
22994+ wrmsr(MSR_EFER, l, h);
22995+ }
22996+#endif
22997
22998 xen_setup_features();
22999
23000@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(
23001
23002 machine_ops = xen_machine_ops;
23003
23004- /*
23005- * The only reliable way to retain the initial address of the
23006- * percpu gdt_page is to remember it here, so we can go and
23007- * mark it RW later, when the initial percpu area is freed.
23008- */
23009- xen_initial_gdt = &per_cpu(gdt_page, 0);
23010-
23011 xen_smp_init();
23012
23013 #ifdef CONFIG_ACPI_NUMA
23014diff -urNp linux-3.1.1/arch/x86/xen/mmu.c linux-3.1.1/arch/x86/xen/mmu.c
23015--- linux-3.1.1/arch/x86/xen/mmu.c 2011-11-11 15:19:27.000000000 -0500
23016+++ linux-3.1.1/arch/x86/xen/mmu.c 2011-11-16 18:39:07.000000000 -0500
23017@@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23018 convert_pfn_mfn(init_level4_pgt);
23019 convert_pfn_mfn(level3_ident_pgt);
23020 convert_pfn_mfn(level3_kernel_pgt);
23021+ convert_pfn_mfn(level3_vmalloc_pgt);
23022+ convert_pfn_mfn(level3_vmemmap_pgt);
23023
23024 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23025 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23026@@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23027 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23028 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23029 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23030+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23031+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23032 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23033+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23034 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23035 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23036
23037@@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_in
23038 pv_mmu_ops.set_pud = xen_set_pud;
23039 #if PAGETABLE_LEVELS == 4
23040 pv_mmu_ops.set_pgd = xen_set_pgd;
23041+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23042 #endif
23043
23044 /* This will work as long as patching hasn't happened yet
23045@@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_o
23046 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23047 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23048 .set_pgd = xen_set_pgd_hyper,
23049+ .set_pgd_batched = xen_set_pgd_hyper,
23050
23051 .alloc_pud = xen_alloc_pmd_init,
23052 .release_pud = xen_release_pmd_init,
23053diff -urNp linux-3.1.1/arch/x86/xen/smp.c linux-3.1.1/arch/x86/xen/smp.c
23054--- linux-3.1.1/arch/x86/xen/smp.c 2011-11-11 15:19:27.000000000 -0500
23055+++ linux-3.1.1/arch/x86/xen/smp.c 2011-11-16 18:39:07.000000000 -0500
23056@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23057 {
23058 BUG_ON(smp_processor_id() != 0);
23059 native_smp_prepare_boot_cpu();
23060-
23061- /* We've switched to the "real" per-cpu gdt, so make sure the
23062- old memory can be recycled */
23063- make_lowmem_page_readwrite(xen_initial_gdt);
23064-
23065 xen_filter_cpu_maps();
23066 xen_setup_vcpu_info_placement();
23067 }
23068@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23069 gdt = get_cpu_gdt_table(cpu);
23070
23071 ctxt->flags = VGCF_IN_KERNEL;
23072- ctxt->user_regs.ds = __USER_DS;
23073- ctxt->user_regs.es = __USER_DS;
23074+ ctxt->user_regs.ds = __KERNEL_DS;
23075+ ctxt->user_regs.es = __KERNEL_DS;
23076 ctxt->user_regs.ss = __KERNEL_DS;
23077 #ifdef CONFIG_X86_32
23078 ctxt->user_regs.fs = __KERNEL_PERCPU;
23079- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23080+ savesegment(gs, ctxt->user_regs.gs);
23081 #else
23082 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23083 #endif
23084@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23085 int rc;
23086
23087 per_cpu(current_task, cpu) = idle;
23088+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23089 #ifdef CONFIG_X86_32
23090 irq_ctx_init(cpu);
23091 #else
23092 clear_tsk_thread_flag(idle, TIF_FORK);
23093- per_cpu(kernel_stack, cpu) =
23094- (unsigned long)task_stack_page(idle) -
23095- KERNEL_STACK_OFFSET + THREAD_SIZE;
23096+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23097 #endif
23098 xen_setup_runstate_info(cpu);
23099 xen_setup_timer(cpu);
23100diff -urNp linux-3.1.1/arch/x86/xen/xen-asm_32.S linux-3.1.1/arch/x86/xen/xen-asm_32.S
23101--- linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-11 15:19:27.000000000 -0500
23102+++ linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-16 18:39:07.000000000 -0500
23103@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23104 ESP_OFFSET=4 # bytes pushed onto stack
23105
23106 /*
23107- * Store vcpu_info pointer for easy access. Do it this way to
23108- * avoid having to reload %fs
23109+ * Store vcpu_info pointer for easy access.
23110 */
23111 #ifdef CONFIG_SMP
23112- GET_THREAD_INFO(%eax)
23113- movl TI_cpu(%eax), %eax
23114- movl __per_cpu_offset(,%eax,4), %eax
23115- mov xen_vcpu(%eax), %eax
23116+ push %fs
23117+ mov $(__KERNEL_PERCPU), %eax
23118+ mov %eax, %fs
23119+ mov PER_CPU_VAR(xen_vcpu), %eax
23120+ pop %fs
23121 #else
23122 movl xen_vcpu, %eax
23123 #endif
23124diff -urNp linux-3.1.1/arch/x86/xen/xen-head.S linux-3.1.1/arch/x86/xen/xen-head.S
23125--- linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-11 15:19:27.000000000 -0500
23126+++ linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-16 18:39:07.000000000 -0500
23127@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23128 #ifdef CONFIG_X86_32
23129 mov %esi,xen_start_info
23130 mov $init_thread_union+THREAD_SIZE,%esp
23131+#ifdef CONFIG_SMP
23132+ movl $cpu_gdt_table,%edi
23133+ movl $__per_cpu_load,%eax
23134+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23135+ rorl $16,%eax
23136+ movb %al,__KERNEL_PERCPU + 4(%edi)
23137+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23138+ movl $__per_cpu_end - 1,%eax
23139+ subl $__per_cpu_start,%eax
23140+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23141+#endif
23142 #else
23143 mov %rsi,xen_start_info
23144 mov $init_thread_union+THREAD_SIZE,%rsp
23145diff -urNp linux-3.1.1/arch/x86/xen/xen-ops.h linux-3.1.1/arch/x86/xen/xen-ops.h
23146--- linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-11 15:19:27.000000000 -0500
23147+++ linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-16 18:39:07.000000000 -0500
23148@@ -10,8 +10,6 @@
23149 extern const char xen_hypervisor_callback[];
23150 extern const char xen_failsafe_callback[];
23151
23152-extern void *xen_initial_gdt;
23153-
23154 struct trap_info;
23155 void xen_copy_trap_info(struct trap_info *traps);
23156
23157diff -urNp linux-3.1.1/block/blk-iopoll.c linux-3.1.1/block/blk-iopoll.c
23158--- linux-3.1.1/block/blk-iopoll.c 2011-11-11 15:19:27.000000000 -0500
23159+++ linux-3.1.1/block/blk-iopoll.c 2011-11-16 18:39:07.000000000 -0500
23160@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23161 }
23162 EXPORT_SYMBOL(blk_iopoll_complete);
23163
23164-static void blk_iopoll_softirq(struct softirq_action *h)
23165+static void blk_iopoll_softirq(void)
23166 {
23167 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23168 int rearm = 0, budget = blk_iopoll_budget;
23169diff -urNp linux-3.1.1/block/blk-map.c linux-3.1.1/block/blk-map.c
23170--- linux-3.1.1/block/blk-map.c 2011-11-11 15:19:27.000000000 -0500
23171+++ linux-3.1.1/block/blk-map.c 2011-11-16 18:39:07.000000000 -0500
23172@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
23173 if (!len || !kbuf)
23174 return -EINVAL;
23175
23176- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23177+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23178 if (do_copy)
23179 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23180 else
23181diff -urNp linux-3.1.1/block/blk-softirq.c linux-3.1.1/block/blk-softirq.c
23182--- linux-3.1.1/block/blk-softirq.c 2011-11-11 15:19:27.000000000 -0500
23183+++ linux-3.1.1/block/blk-softirq.c 2011-11-16 18:39:07.000000000 -0500
23184@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23185 * Softirq action handler - move entries to local list and loop over them
23186 * while passing them to the queue registered handler.
23187 */
23188-static void blk_done_softirq(struct softirq_action *h)
23189+static void blk_done_softirq(void)
23190 {
23191 struct list_head *cpu_list, local_list;
23192
23193diff -urNp linux-3.1.1/block/bsg.c linux-3.1.1/block/bsg.c
23194--- linux-3.1.1/block/bsg.c 2011-11-11 15:19:27.000000000 -0500
23195+++ linux-3.1.1/block/bsg.c 2011-11-16 18:39:07.000000000 -0500
23196@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23197 struct sg_io_v4 *hdr, struct bsg_device *bd,
23198 fmode_t has_write_perm)
23199 {
23200+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23201+ unsigned char *cmdptr;
23202+
23203 if (hdr->request_len > BLK_MAX_CDB) {
23204 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23205 if (!rq->cmd)
23206 return -ENOMEM;
23207- }
23208+ cmdptr = rq->cmd;
23209+ } else
23210+ cmdptr = tmpcmd;
23211
23212- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
23213+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23214 hdr->request_len))
23215 return -EFAULT;
23216
23217+ if (cmdptr != rq->cmd)
23218+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23219+
23220 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23221 if (blk_verify_command(rq->cmd, has_write_perm))
23222 return -EPERM;
23223diff -urNp linux-3.1.1/block/compat_ioctl.c linux-3.1.1/block/compat_ioctl.c
23224--- linux-3.1.1/block/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23225+++ linux-3.1.1/block/compat_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23226@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
23227 err |= __get_user(f->spec1, &uf->spec1);
23228 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23229 err |= __get_user(name, &uf->name);
23230- f->name = compat_ptr(name);
23231+ f->name = (void __force_kernel *)compat_ptr(name);
23232 if (err) {
23233 err = -EFAULT;
23234 goto out;
23235diff -urNp linux-3.1.1/block/scsi_ioctl.c linux-3.1.1/block/scsi_ioctl.c
23236--- linux-3.1.1/block/scsi_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23237+++ linux-3.1.1/block/scsi_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23238@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23239 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23240 struct sg_io_hdr *hdr, fmode_t mode)
23241 {
23242- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23243+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23244+ unsigned char *cmdptr;
23245+
23246+ if (rq->cmd != rq->__cmd)
23247+ cmdptr = rq->cmd;
23248+ else
23249+ cmdptr = tmpcmd;
23250+
23251+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23252 return -EFAULT;
23253+
23254+ if (cmdptr != rq->cmd)
23255+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23256+
23257 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23258 return -EPERM;
23259
23260@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23261 int err;
23262 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23263 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23264+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23265+ unsigned char *cmdptr;
23266
23267 if (!sic)
23268 return -EINVAL;
23269@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23270 */
23271 err = -EFAULT;
23272 rq->cmd_len = cmdlen;
23273- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23274+
23275+ if (rq->cmd != rq->__cmd)
23276+ cmdptr = rq->cmd;
23277+ else
23278+ cmdptr = tmpcmd;
23279+
23280+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23281 goto error;
23282
23283+ if (rq->cmd != cmdptr)
23284+ memcpy(rq->cmd, cmdptr, cmdlen);
23285+
23286 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23287 goto error;
23288
23289diff -urNp linux-3.1.1/crypto/cryptd.c linux-3.1.1/crypto/cryptd.c
23290--- linux-3.1.1/crypto/cryptd.c 2011-11-11 15:19:27.000000000 -0500
23291+++ linux-3.1.1/crypto/cryptd.c 2011-11-16 18:39:07.000000000 -0500
23292@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23293
23294 struct cryptd_blkcipher_request_ctx {
23295 crypto_completion_t complete;
23296-};
23297+} __no_const;
23298
23299 struct cryptd_hash_ctx {
23300 struct crypto_shash *child;
23301@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23302
23303 struct cryptd_aead_request_ctx {
23304 crypto_completion_t complete;
23305-};
23306+} __no_const;
23307
23308 static void cryptd_queue_worker(struct work_struct *work);
23309
23310diff -urNp linux-3.1.1/crypto/serpent.c linux-3.1.1/crypto/serpent.c
23311--- linux-3.1.1/crypto/serpent.c 2011-11-11 15:19:27.000000000 -0500
23312+++ linux-3.1.1/crypto/serpent.c 2011-11-16 18:40:10.000000000 -0500
23313@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23314 u32 r0,r1,r2,r3,r4;
23315 int i;
23316
23317+ pax_track_stack();
23318+
23319 /* Copy key, add padding */
23320
23321 for (i = 0; i < keylen; ++i)
23322diff -urNp linux-3.1.1/Documentation/dontdiff linux-3.1.1/Documentation/dontdiff
23323--- linux-3.1.1/Documentation/dontdiff 2011-11-11 15:19:27.000000000 -0500
23324+++ linux-3.1.1/Documentation/dontdiff 2011-11-16 18:39:07.000000000 -0500
23325@@ -5,6 +5,7 @@
23326 *.cis
23327 *.cpio
23328 *.csp
23329+*.dbg
23330 *.dsp
23331 *.dvi
23332 *.elf
23333@@ -48,9 +49,11 @@
23334 *.tab.h
23335 *.tex
23336 *.ver
23337+*.vim
23338 *.xml
23339 *.xz
23340 *_MODULES
23341+*_reg_safe.h
23342 *_vga16.c
23343 *~
23344 \#*#
23345@@ -70,6 +73,7 @@ Kerntypes
23346 Module.markers
23347 Module.symvers
23348 PENDING
23349+PERF*
23350 SCCS
23351 System.map*
23352 TAGS
23353@@ -93,19 +97,24 @@ bounds.h
23354 bsetup
23355 btfixupprep
23356 build
23357+builtin-policy.h
23358 bvmlinux
23359 bzImage*
23360 capability_names.h
23361 capflags.c
23362 classlist.h*
23363+clut_vga16.c
23364+common-cmds.h
23365 comp*.log
23366 compile.h*
23367 conf
23368 config
23369 config-*
23370 config_data.h*
23371+config.c
23372 config.mak
23373 config.mak.autogen
23374+config.tmp
23375 conmakehash
23376 consolemap_deftbl.c*
23377 cpustr.h
23378@@ -119,6 +128,7 @@ dslm
23379 elf2ecoff
23380 elfconfig.h*
23381 evergreen_reg_safe.h
23382+exception_policy.conf
23383 fixdep
23384 flask.h
23385 fore200e_mkfirm
23386@@ -126,12 +136,14 @@ fore200e_pca_fw.c*
23387 gconf
23388 gconf.glade.h
23389 gen-devlist
23390+gen-kdb_cmds.c
23391 gen_crc32table
23392 gen_init_cpio
23393 generated
23394 genheaders
23395 genksyms
23396 *_gray256.c
23397+hash
23398 hpet_example
23399 hugepage-mmap
23400 hugepage-shm
23401@@ -146,7 +158,7 @@ int32.c
23402 int4.c
23403 int8.c
23404 kallsyms
23405-kconfig
23406+kern_constants.h
23407 keywords.c
23408 ksym.c*
23409 ksym.h*
23410@@ -154,7 +166,6 @@ kxgettext
23411 lkc_defs.h
23412 lex.c
23413 lex.*.c
23414-linux
23415 logo_*.c
23416 logo_*_clut224.c
23417 logo_*_mono.c
23418@@ -166,7 +177,6 @@ machtypes.h
23419 map
23420 map_hugetlb
23421 maui_boot.h
23422-media
23423 mconf
23424 miboot*
23425 mk_elfconfig
23426@@ -174,6 +184,7 @@ mkboot
23427 mkbugboot
23428 mkcpustr
23429 mkdep
23430+mkpiggy
23431 mkprep
23432 mkregtable
23433 mktables
23434@@ -209,6 +220,7 @@ r300_reg_safe.h
23435 r420_reg_safe.h
23436 r600_reg_safe.h
23437 recordmcount
23438+regdb.c
23439 relocs
23440 rlim_names.h
23441 rn50_reg_safe.h
23442@@ -219,6 +231,7 @@ setup
23443 setup.bin
23444 setup.elf
23445 sImage
23446+slabinfo
23447 sm_tbl*
23448 split-include
23449 syscalltab.h
23450@@ -229,6 +242,7 @@ tftpboot.img
23451 timeconst.h
23452 times.h*
23453 trix_boot.h
23454+user_constants.h
23455 utsrelease.h*
23456 vdso-syms.lds
23457 vdso.lds
23458@@ -246,7 +260,9 @@ vmlinux
23459 vmlinux-*
23460 vmlinux.aout
23461 vmlinux.bin.all
23462+vmlinux.bin.bz2
23463 vmlinux.lds
23464+vmlinux.relocs
23465 vmlinuz
23466 voffset.h
23467 vsyscall.lds
23468@@ -254,9 +270,11 @@ vsyscall_32.lds
23469 wanxlfw.inc
23470 uImage
23471 unifdef
23472+utsrelease.h
23473 wakeup.bin
23474 wakeup.elf
23475 wakeup.lds
23476 zImage*
23477 zconf.hash.c
23478+zconf.lex.c
23479 zoffset.h
23480diff -urNp linux-3.1.1/Documentation/kernel-parameters.txt linux-3.1.1/Documentation/kernel-parameters.txt
23481--- linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-11 15:19:27.000000000 -0500
23482+++ linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-16 18:39:07.000000000 -0500
23483@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes
23484 the specified number of seconds. This is to be used if
23485 your oopses keep scrolling off the screen.
23486
23487+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23488+ virtualization environments that don't cope well with the
23489+ expand down segment used by UDEREF on X86-32 or the frequent
23490+ page table updates on X86-64.
23491+
23492+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23493+
23494 pcbit= [HW,ISDN]
23495
23496 pcd. [PARIDE]
23497diff -urNp linux-3.1.1/drivers/acpi/apei/cper.c linux-3.1.1/drivers/acpi/apei/cper.c
23498--- linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-11 15:19:27.000000000 -0500
23499+++ linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-16 18:39:07.000000000 -0500
23500@@ -38,12 +38,12 @@
23501 */
23502 u64 cper_next_record_id(void)
23503 {
23504- static atomic64_t seq;
23505+ static atomic64_unchecked_t seq;
23506
23507- if (!atomic64_read(&seq))
23508- atomic64_set(&seq, ((u64)get_seconds()) << 32);
23509+ if (!atomic64_read_unchecked(&seq))
23510+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23511
23512- return atomic64_inc_return(&seq);
23513+ return atomic64_inc_return_unchecked(&seq);
23514 }
23515 EXPORT_SYMBOL_GPL(cper_next_record_id);
23516
23517diff -urNp linux-3.1.1/drivers/acpi/ec_sys.c linux-3.1.1/drivers/acpi/ec_sys.c
23518--- linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-11 15:19:27.000000000 -0500
23519+++ linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-16 18:39:07.000000000 -0500
23520@@ -11,6 +11,7 @@
23521 #include <linux/kernel.h>
23522 #include <linux/acpi.h>
23523 #include <linux/debugfs.h>
23524+#include <asm/uaccess.h>
23525 #include "internal.h"
23526
23527 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
23528@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23529 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23530 */
23531 unsigned int size = EC_SPACE_SIZE;
23532- u8 *data = (u8 *) buf;
23533+ u8 data;
23534 loff_t init_off = *off;
23535 int err = 0;
23536
23537@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23538 size = count;
23539
23540 while (size) {
23541- err = ec_read(*off, &data[*off - init_off]);
23542+ err = ec_read(*off, &data);
23543 if (err)
23544 return err;
23545+ if (put_user(data, &buf[*off - init_off]))
23546+ return -EFAULT;
23547 *off += 1;
23548 size--;
23549 }
23550@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23551
23552 unsigned int size = count;
23553 loff_t init_off = *off;
23554- u8 *data = (u8 *) buf;
23555 int err = 0;
23556
23557 if (*off >= EC_SPACE_SIZE)
23558@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23559 }
23560
23561 while (size) {
23562- u8 byte_write = data[*off - init_off];
23563+ u8 byte_write;
23564+ if (get_user(byte_write, &buf[*off - init_off]))
23565+ return -EFAULT;
23566 err = ec_write(*off, byte_write);
23567 if (err)
23568 return err;
23569diff -urNp linux-3.1.1/drivers/acpi/proc.c linux-3.1.1/drivers/acpi/proc.c
23570--- linux-3.1.1/drivers/acpi/proc.c 2011-11-11 15:19:27.000000000 -0500
23571+++ linux-3.1.1/drivers/acpi/proc.c 2011-11-16 18:39:07.000000000 -0500
23572@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23573 size_t count, loff_t * ppos)
23574 {
23575 struct list_head *node, *next;
23576- char strbuf[5];
23577- char str[5] = "";
23578- unsigned int len = count;
23579-
23580- if (len > 4)
23581- len = 4;
23582- if (len < 0)
23583- return -EFAULT;
23584+ char strbuf[5] = {0};
23585
23586- if (copy_from_user(strbuf, buffer, len))
23587+ if (count > 4)
23588+ count = 4;
23589+ if (copy_from_user(strbuf, buffer, count))
23590 return -EFAULT;
23591- strbuf[len] = '\0';
23592- sscanf(strbuf, "%s", str);
23593+ strbuf[count] = '\0';
23594
23595 mutex_lock(&acpi_device_lock);
23596 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23597@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23598 if (!dev->wakeup.flags.valid)
23599 continue;
23600
23601- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23602+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23603 if (device_can_wakeup(&dev->dev)) {
23604 bool enable = !device_may_wakeup(&dev->dev);
23605 device_set_wakeup_enable(&dev->dev, enable);
23606diff -urNp linux-3.1.1/drivers/acpi/processor_driver.c linux-3.1.1/drivers/acpi/processor_driver.c
23607--- linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-11 15:19:27.000000000 -0500
23608+++ linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-16 18:39:07.000000000 -0500
23609@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23610 return 0;
23611 #endif
23612
23613- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23614+ BUG_ON(pr->id >= nr_cpu_ids);
23615
23616 /*
23617 * Buggy BIOS check
23618diff -urNp linux-3.1.1/drivers/ata/libata-core.c linux-3.1.1/drivers/ata/libata-core.c
23619--- linux-3.1.1/drivers/ata/libata-core.c 2011-11-11 15:19:27.000000000 -0500
23620+++ linux-3.1.1/drivers/ata/libata-core.c 2011-11-16 18:39:07.000000000 -0500
23621@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *
23622 struct ata_port *ap;
23623 unsigned int tag;
23624
23625- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23626+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23627 ap = qc->ap;
23628
23629 qc->flags = 0;
23630@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued
23631 struct ata_port *ap;
23632 struct ata_link *link;
23633
23634- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23635+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23636 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23637 ap = qc->ap;
23638 link = qc->dev->link;
23639@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct
23640 return;
23641
23642 spin_lock(&lock);
23643+ pax_open_kernel();
23644
23645 for (cur = ops->inherits; cur; cur = cur->inherits) {
23646 void **inherit = (void **)cur;
23647@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct
23648 if (IS_ERR(*pp))
23649 *pp = NULL;
23650
23651- ops->inherits = NULL;
23652+ *(struct ata_port_operations **)&ops->inherits = NULL;
23653
23654+ pax_close_kernel();
23655 spin_unlock(&lock);
23656 }
23657
23658diff -urNp linux-3.1.1/drivers/ata/libata-eh.c linux-3.1.1/drivers/ata/libata-eh.c
23659--- linux-3.1.1/drivers/ata/libata-eh.c 2011-11-11 15:19:27.000000000 -0500
23660+++ linux-3.1.1/drivers/ata/libata-eh.c 2011-11-16 18:40:10.000000000 -0500
23661@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
23662 {
23663 struct ata_link *link;
23664
23665+ pax_track_stack();
23666+
23667 ata_for_each_link(link, ap, HOST_FIRST)
23668 ata_eh_link_report(link);
23669 }
23670diff -urNp linux-3.1.1/drivers/ata/pata_arasan_cf.c linux-3.1.1/drivers/ata/pata_arasan_cf.c
23671--- linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-11 15:19:27.000000000 -0500
23672+++ linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-16 18:39:07.000000000 -0500
23673@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23674 /* Handle platform specific quirks */
23675 if (pdata->quirk) {
23676 if (pdata->quirk & CF_BROKEN_PIO) {
23677- ap->ops->set_piomode = NULL;
23678+ pax_open_kernel();
23679+ *(void **)&ap->ops->set_piomode = NULL;
23680+ pax_close_kernel();
23681 ap->pio_mask = 0;
23682 }
23683 if (pdata->quirk & CF_BROKEN_MWDMA)
23684diff -urNp linux-3.1.1/drivers/atm/adummy.c linux-3.1.1/drivers/atm/adummy.c
23685--- linux-3.1.1/drivers/atm/adummy.c 2011-11-11 15:19:27.000000000 -0500
23686+++ linux-3.1.1/drivers/atm/adummy.c 2011-11-16 18:39:07.000000000 -0500
23687@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23688 vcc->pop(vcc, skb);
23689 else
23690 dev_kfree_skb_any(skb);
23691- atomic_inc(&vcc->stats->tx);
23692+ atomic_inc_unchecked(&vcc->stats->tx);
23693
23694 return 0;
23695 }
23696diff -urNp linux-3.1.1/drivers/atm/ambassador.c linux-3.1.1/drivers/atm/ambassador.c
23697--- linux-3.1.1/drivers/atm/ambassador.c 2011-11-11 15:19:27.000000000 -0500
23698+++ linux-3.1.1/drivers/atm/ambassador.c 2011-11-16 18:39:07.000000000 -0500
23699@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23700 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23701
23702 // VC layer stats
23703- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23704+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23705
23706 // free the descriptor
23707 kfree (tx_descr);
23708@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23709 dump_skb ("<<<", vc, skb);
23710
23711 // VC layer stats
23712- atomic_inc(&atm_vcc->stats->rx);
23713+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23714 __net_timestamp(skb);
23715 // end of our responsibility
23716 atm_vcc->push (atm_vcc, skb);
23717@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23718 } else {
23719 PRINTK (KERN_INFO, "dropped over-size frame");
23720 // should we count this?
23721- atomic_inc(&atm_vcc->stats->rx_drop);
23722+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23723 }
23724
23725 } else {
23726@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
23727 }
23728
23729 if (check_area (skb->data, skb->len)) {
23730- atomic_inc(&atm_vcc->stats->tx_err);
23731+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23732 return -ENOMEM; // ?
23733 }
23734
23735diff -urNp linux-3.1.1/drivers/atm/atmtcp.c linux-3.1.1/drivers/atm/atmtcp.c
23736--- linux-3.1.1/drivers/atm/atmtcp.c 2011-11-11 15:19:27.000000000 -0500
23737+++ linux-3.1.1/drivers/atm/atmtcp.c 2011-11-16 18:39:07.000000000 -0500
23738@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23739 if (vcc->pop) vcc->pop(vcc,skb);
23740 else dev_kfree_skb(skb);
23741 if (dev_data) return 0;
23742- atomic_inc(&vcc->stats->tx_err);
23743+ atomic_inc_unchecked(&vcc->stats->tx_err);
23744 return -ENOLINK;
23745 }
23746 size = skb->len+sizeof(struct atmtcp_hdr);
23747@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23748 if (!new_skb) {
23749 if (vcc->pop) vcc->pop(vcc,skb);
23750 else dev_kfree_skb(skb);
23751- atomic_inc(&vcc->stats->tx_err);
23752+ atomic_inc_unchecked(&vcc->stats->tx_err);
23753 return -ENOBUFS;
23754 }
23755 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23756@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23757 if (vcc->pop) vcc->pop(vcc,skb);
23758 else dev_kfree_skb(skb);
23759 out_vcc->push(out_vcc,new_skb);
23760- atomic_inc(&vcc->stats->tx);
23761- atomic_inc(&out_vcc->stats->rx);
23762+ atomic_inc_unchecked(&vcc->stats->tx);
23763+ atomic_inc_unchecked(&out_vcc->stats->rx);
23764 return 0;
23765 }
23766
23767@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23768 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23769 read_unlock(&vcc_sklist_lock);
23770 if (!out_vcc) {
23771- atomic_inc(&vcc->stats->tx_err);
23772+ atomic_inc_unchecked(&vcc->stats->tx_err);
23773 goto done;
23774 }
23775 skb_pull(skb,sizeof(struct atmtcp_hdr));
23776@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23777 __net_timestamp(new_skb);
23778 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23779 out_vcc->push(out_vcc,new_skb);
23780- atomic_inc(&vcc->stats->tx);
23781- atomic_inc(&out_vcc->stats->rx);
23782+ atomic_inc_unchecked(&vcc->stats->tx);
23783+ atomic_inc_unchecked(&out_vcc->stats->rx);
23784 done:
23785 if (vcc->pop) vcc->pop(vcc,skb);
23786 else dev_kfree_skb(skb);
23787diff -urNp linux-3.1.1/drivers/atm/eni.c linux-3.1.1/drivers/atm/eni.c
23788--- linux-3.1.1/drivers/atm/eni.c 2011-11-11 15:19:27.000000000 -0500
23789+++ linux-3.1.1/drivers/atm/eni.c 2011-11-16 18:39:07.000000000 -0500
23790@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23791 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23792 vcc->dev->number);
23793 length = 0;
23794- atomic_inc(&vcc->stats->rx_err);
23795+ atomic_inc_unchecked(&vcc->stats->rx_err);
23796 }
23797 else {
23798 length = ATM_CELL_SIZE-1; /* no HEC */
23799@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23800 size);
23801 }
23802 eff = length = 0;
23803- atomic_inc(&vcc->stats->rx_err);
23804+ atomic_inc_unchecked(&vcc->stats->rx_err);
23805 }
23806 else {
23807 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23808@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23809 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23810 vcc->dev->number,vcc->vci,length,size << 2,descr);
23811 length = eff = 0;
23812- atomic_inc(&vcc->stats->rx_err);
23813+ atomic_inc_unchecked(&vcc->stats->rx_err);
23814 }
23815 }
23816 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23817@@ -771,7 +771,7 @@ rx_dequeued++;
23818 vcc->push(vcc,skb);
23819 pushed++;
23820 }
23821- atomic_inc(&vcc->stats->rx);
23822+ atomic_inc_unchecked(&vcc->stats->rx);
23823 }
23824 wake_up(&eni_dev->rx_wait);
23825 }
23826@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23827 PCI_DMA_TODEVICE);
23828 if (vcc->pop) vcc->pop(vcc,skb);
23829 else dev_kfree_skb_irq(skb);
23830- atomic_inc(&vcc->stats->tx);
23831+ atomic_inc_unchecked(&vcc->stats->tx);
23832 wake_up(&eni_dev->tx_wait);
23833 dma_complete++;
23834 }
23835@@ -1568,7 +1568,7 @@ tx_complete++;
23836 /*--------------------------------- entries ---------------------------------*/
23837
23838
23839-static const char *media_name[] __devinitdata = {
23840+static const char *media_name[] __devinitconst = {
23841 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23842 "UTP", "05?", "06?", "07?", /* 4- 7 */
23843 "TAXI","09?", "10?", "11?", /* 8-11 */
23844diff -urNp linux-3.1.1/drivers/atm/firestream.c linux-3.1.1/drivers/atm/firestream.c
23845--- linux-3.1.1/drivers/atm/firestream.c 2011-11-11 15:19:27.000000000 -0500
23846+++ linux-3.1.1/drivers/atm/firestream.c 2011-11-16 18:39:07.000000000 -0500
23847@@ -750,7 +750,7 @@ static void process_txdone_queue (struct
23848 }
23849 }
23850
23851- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23852+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23853
23854 fs_dprintk (FS_DEBUG_TXMEM, "i");
23855 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23856@@ -817,7 +817,7 @@ static void process_incoming (struct fs_
23857 #endif
23858 skb_put (skb, qe->p1 & 0xffff);
23859 ATM_SKB(skb)->vcc = atm_vcc;
23860- atomic_inc(&atm_vcc->stats->rx);
23861+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23862 __net_timestamp(skb);
23863 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23864 atm_vcc->push (atm_vcc, skb);
23865@@ -838,12 +838,12 @@ static void process_incoming (struct fs_
23866 kfree (pe);
23867 }
23868 if (atm_vcc)
23869- atomic_inc(&atm_vcc->stats->rx_drop);
23870+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23871 break;
23872 case 0x1f: /* Reassembly abort: no buffers. */
23873 /* Silently increment error counter. */
23874 if (atm_vcc)
23875- atomic_inc(&atm_vcc->stats->rx_drop);
23876+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23877 break;
23878 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23879 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23880diff -urNp linux-3.1.1/drivers/atm/fore200e.c linux-3.1.1/drivers/atm/fore200e.c
23881--- linux-3.1.1/drivers/atm/fore200e.c 2011-11-11 15:19:27.000000000 -0500
23882+++ linux-3.1.1/drivers/atm/fore200e.c 2011-11-16 18:39:07.000000000 -0500
23883@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23884 #endif
23885 /* check error condition */
23886 if (*entry->status & STATUS_ERROR)
23887- atomic_inc(&vcc->stats->tx_err);
23888+ atomic_inc_unchecked(&vcc->stats->tx_err);
23889 else
23890- atomic_inc(&vcc->stats->tx);
23891+ atomic_inc_unchecked(&vcc->stats->tx);
23892 }
23893 }
23894
23895@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23896 if (skb == NULL) {
23897 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23898
23899- atomic_inc(&vcc->stats->rx_drop);
23900+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23901 return -ENOMEM;
23902 }
23903
23904@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23905
23906 dev_kfree_skb_any(skb);
23907
23908- atomic_inc(&vcc->stats->rx_drop);
23909+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23910 return -ENOMEM;
23911 }
23912
23913 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23914
23915 vcc->push(vcc, skb);
23916- atomic_inc(&vcc->stats->rx);
23917+ atomic_inc_unchecked(&vcc->stats->rx);
23918
23919 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23920
23921@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23922 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23923 fore200e->atm_dev->number,
23924 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23925- atomic_inc(&vcc->stats->rx_err);
23926+ atomic_inc_unchecked(&vcc->stats->rx_err);
23927 }
23928 }
23929
23930@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23931 goto retry_here;
23932 }
23933
23934- atomic_inc(&vcc->stats->tx_err);
23935+ atomic_inc_unchecked(&vcc->stats->tx_err);
23936
23937 fore200e->tx_sat++;
23938 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23939diff -urNp linux-3.1.1/drivers/atm/he.c linux-3.1.1/drivers/atm/he.c
23940--- linux-3.1.1/drivers/atm/he.c 2011-11-11 15:19:27.000000000 -0500
23941+++ linux-3.1.1/drivers/atm/he.c 2011-11-16 18:39:07.000000000 -0500
23942@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23943
23944 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23945 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23946- atomic_inc(&vcc->stats->rx_drop);
23947+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23948 goto return_host_buffers;
23949 }
23950
23951@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23952 RBRQ_LEN_ERR(he_dev->rbrq_head)
23953 ? "LEN_ERR" : "",
23954 vcc->vpi, vcc->vci);
23955- atomic_inc(&vcc->stats->rx_err);
23956+ atomic_inc_unchecked(&vcc->stats->rx_err);
23957 goto return_host_buffers;
23958 }
23959
23960@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23961 vcc->push(vcc, skb);
23962 spin_lock(&he_dev->global_lock);
23963
23964- atomic_inc(&vcc->stats->rx);
23965+ atomic_inc_unchecked(&vcc->stats->rx);
23966
23967 return_host_buffers:
23968 ++pdus_assembled;
23969@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23970 tpd->vcc->pop(tpd->vcc, tpd->skb);
23971 else
23972 dev_kfree_skb_any(tpd->skb);
23973- atomic_inc(&tpd->vcc->stats->tx_err);
23974+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23975 }
23976 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23977 return;
23978@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23979 vcc->pop(vcc, skb);
23980 else
23981 dev_kfree_skb_any(skb);
23982- atomic_inc(&vcc->stats->tx_err);
23983+ atomic_inc_unchecked(&vcc->stats->tx_err);
23984 return -EINVAL;
23985 }
23986
23987@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23988 vcc->pop(vcc, skb);
23989 else
23990 dev_kfree_skb_any(skb);
23991- atomic_inc(&vcc->stats->tx_err);
23992+ atomic_inc_unchecked(&vcc->stats->tx_err);
23993 return -EINVAL;
23994 }
23995 #endif
23996@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23997 vcc->pop(vcc, skb);
23998 else
23999 dev_kfree_skb_any(skb);
24000- atomic_inc(&vcc->stats->tx_err);
24001+ atomic_inc_unchecked(&vcc->stats->tx_err);
24002 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24003 return -ENOMEM;
24004 }
24005@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24006 vcc->pop(vcc, skb);
24007 else
24008 dev_kfree_skb_any(skb);
24009- atomic_inc(&vcc->stats->tx_err);
24010+ atomic_inc_unchecked(&vcc->stats->tx_err);
24011 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24012 return -ENOMEM;
24013 }
24014@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24015 __enqueue_tpd(he_dev, tpd, cid);
24016 spin_unlock_irqrestore(&he_dev->global_lock, flags);
24017
24018- atomic_inc(&vcc->stats->tx);
24019+ atomic_inc_unchecked(&vcc->stats->tx);
24020
24021 return 0;
24022 }
24023diff -urNp linux-3.1.1/drivers/atm/horizon.c linux-3.1.1/drivers/atm/horizon.c
24024--- linux-3.1.1/drivers/atm/horizon.c 2011-11-11 15:19:27.000000000 -0500
24025+++ linux-3.1.1/drivers/atm/horizon.c 2011-11-16 18:39:07.000000000 -0500
24026@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev,
24027 {
24028 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24029 // VC layer stats
24030- atomic_inc(&vcc->stats->rx);
24031+ atomic_inc_unchecked(&vcc->stats->rx);
24032 __net_timestamp(skb);
24033 // end of our responsibility
24034 vcc->push (vcc, skb);
24035@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const
24036 dev->tx_iovec = NULL;
24037
24038 // VC layer stats
24039- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24040+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24041
24042 // free the skb
24043 hrz_kfree_skb (skb);
24044diff -urNp linux-3.1.1/drivers/atm/idt77252.c linux-3.1.1/drivers/atm/idt77252.c
24045--- linux-3.1.1/drivers/atm/idt77252.c 2011-11-11 15:19:27.000000000 -0500
24046+++ linux-3.1.1/drivers/atm/idt77252.c 2011-11-16 18:39:07.000000000 -0500
24047@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
24048 else
24049 dev_kfree_skb(skb);
24050
24051- atomic_inc(&vcc->stats->tx);
24052+ atomic_inc_unchecked(&vcc->stats->tx);
24053 }
24054
24055 atomic_dec(&scq->used);
24056@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
24057 if ((sb = dev_alloc_skb(64)) == NULL) {
24058 printk("%s: Can't allocate buffers for aal0.\n",
24059 card->name);
24060- atomic_add(i, &vcc->stats->rx_drop);
24061+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24062 break;
24063 }
24064 if (!atm_charge(vcc, sb->truesize)) {
24065 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24066 card->name);
24067- atomic_add(i - 1, &vcc->stats->rx_drop);
24068+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24069 dev_kfree_skb(sb);
24070 break;
24071 }
24072@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
24073 ATM_SKB(sb)->vcc = vcc;
24074 __net_timestamp(sb);
24075 vcc->push(vcc, sb);
24076- atomic_inc(&vcc->stats->rx);
24077+ atomic_inc_unchecked(&vcc->stats->rx);
24078
24079 cell += ATM_CELL_PAYLOAD;
24080 }
24081@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
24082 "(CDC: %08x)\n",
24083 card->name, len, rpp->len, readl(SAR_REG_CDC));
24084 recycle_rx_pool_skb(card, rpp);
24085- atomic_inc(&vcc->stats->rx_err);
24086+ atomic_inc_unchecked(&vcc->stats->rx_err);
24087 return;
24088 }
24089 if (stat & SAR_RSQE_CRC) {
24090 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24091 recycle_rx_pool_skb(card, rpp);
24092- atomic_inc(&vcc->stats->rx_err);
24093+ atomic_inc_unchecked(&vcc->stats->rx_err);
24094 return;
24095 }
24096 if (skb_queue_len(&rpp->queue) > 1) {
24097@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
24098 RXPRINTK("%s: Can't alloc RX skb.\n",
24099 card->name);
24100 recycle_rx_pool_skb(card, rpp);
24101- atomic_inc(&vcc->stats->rx_err);
24102+ atomic_inc_unchecked(&vcc->stats->rx_err);
24103 return;
24104 }
24105 if (!atm_charge(vcc, skb->truesize)) {
24106@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
24107 __net_timestamp(skb);
24108
24109 vcc->push(vcc, skb);
24110- atomic_inc(&vcc->stats->rx);
24111+ atomic_inc_unchecked(&vcc->stats->rx);
24112
24113 return;
24114 }
24115@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
24116 __net_timestamp(skb);
24117
24118 vcc->push(vcc, skb);
24119- atomic_inc(&vcc->stats->rx);
24120+ atomic_inc_unchecked(&vcc->stats->rx);
24121
24122 if (skb->truesize > SAR_FB_SIZE_3)
24123 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24124@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24125 if (vcc->qos.aal != ATM_AAL0) {
24126 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24127 card->name, vpi, vci);
24128- atomic_inc(&vcc->stats->rx_drop);
24129+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24130 goto drop;
24131 }
24132
24133 if ((sb = dev_alloc_skb(64)) == NULL) {
24134 printk("%s: Can't allocate buffers for AAL0.\n",
24135 card->name);
24136- atomic_inc(&vcc->stats->rx_err);
24137+ atomic_inc_unchecked(&vcc->stats->rx_err);
24138 goto drop;
24139 }
24140
24141@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24142 ATM_SKB(sb)->vcc = vcc;
24143 __net_timestamp(sb);
24144 vcc->push(vcc, sb);
24145- atomic_inc(&vcc->stats->rx);
24146+ atomic_inc_unchecked(&vcc->stats->rx);
24147
24148 drop:
24149 skb_pull(queue, 64);
24150@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24151
24152 if (vc == NULL) {
24153 printk("%s: NULL connection in send().\n", card->name);
24154- atomic_inc(&vcc->stats->tx_err);
24155+ atomic_inc_unchecked(&vcc->stats->tx_err);
24156 dev_kfree_skb(skb);
24157 return -EINVAL;
24158 }
24159 if (!test_bit(VCF_TX, &vc->flags)) {
24160 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24161- atomic_inc(&vcc->stats->tx_err);
24162+ atomic_inc_unchecked(&vcc->stats->tx_err);
24163 dev_kfree_skb(skb);
24164 return -EINVAL;
24165 }
24166@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24167 break;
24168 default:
24169 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24170- atomic_inc(&vcc->stats->tx_err);
24171+ atomic_inc_unchecked(&vcc->stats->tx_err);
24172 dev_kfree_skb(skb);
24173 return -EINVAL;
24174 }
24175
24176 if (skb_shinfo(skb)->nr_frags != 0) {
24177 printk("%s: No scatter-gather yet.\n", card->name);
24178- atomic_inc(&vcc->stats->tx_err);
24179+ atomic_inc_unchecked(&vcc->stats->tx_err);
24180 dev_kfree_skb(skb);
24181 return -EINVAL;
24182 }
24183@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24184
24185 err = queue_skb(card, vc, skb, oam);
24186 if (err) {
24187- atomic_inc(&vcc->stats->tx_err);
24188+ atomic_inc_unchecked(&vcc->stats->tx_err);
24189 dev_kfree_skb(skb);
24190 return err;
24191 }
24192@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24193 skb = dev_alloc_skb(64);
24194 if (!skb) {
24195 printk("%s: Out of memory in send_oam().\n", card->name);
24196- atomic_inc(&vcc->stats->tx_err);
24197+ atomic_inc_unchecked(&vcc->stats->tx_err);
24198 return -ENOMEM;
24199 }
24200 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24201diff -urNp linux-3.1.1/drivers/atm/iphase.c linux-3.1.1/drivers/atm/iphase.c
24202--- linux-3.1.1/drivers/atm/iphase.c 2011-11-11 15:19:27.000000000 -0500
24203+++ linux-3.1.1/drivers/atm/iphase.c 2011-11-16 18:39:07.000000000 -0500
24204@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
24205 status = (u_short) (buf_desc_ptr->desc_mode);
24206 if (status & (RX_CER | RX_PTE | RX_OFL))
24207 {
24208- atomic_inc(&vcc->stats->rx_err);
24209+ atomic_inc_unchecked(&vcc->stats->rx_err);
24210 IF_ERR(printk("IA: bad packet, dropping it");)
24211 if (status & RX_CER) {
24212 IF_ERR(printk(" cause: packet CRC error\n");)
24213@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
24214 len = dma_addr - buf_addr;
24215 if (len > iadev->rx_buf_sz) {
24216 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
24217- atomic_inc(&vcc->stats->rx_err);
24218+ atomic_inc_unchecked(&vcc->stats->rx_err);
24219 goto out_free_desc;
24220 }
24221
24222@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *
24223 ia_vcc = INPH_IA_VCC(vcc);
24224 if (ia_vcc == NULL)
24225 {
24226- atomic_inc(&vcc->stats->rx_err);
24227+ atomic_inc_unchecked(&vcc->stats->rx_err);
24228 dev_kfree_skb_any(skb);
24229 atm_return(vcc, atm_guess_pdu2truesize(len));
24230 goto INCR_DLE;
24231@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *
24232 if ((length > iadev->rx_buf_sz) || (length >
24233 (skb->len - sizeof(struct cpcs_trailer))))
24234 {
24235- atomic_inc(&vcc->stats->rx_err);
24236+ atomic_inc_unchecked(&vcc->stats->rx_err);
24237 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
24238 length, skb->len);)
24239 dev_kfree_skb_any(skb);
24240@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *
24241
24242 IF_RX(printk("rx_dle_intr: skb push");)
24243 vcc->push(vcc,skb);
24244- atomic_inc(&vcc->stats->rx);
24245+ atomic_inc_unchecked(&vcc->stats->rx);
24246 iadev->rx_pkt_cnt++;
24247 }
24248 INCR_DLE:
24249@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev,
24250 {
24251 struct k_sonet_stats *stats;
24252 stats = &PRIV(_ia_dev[board])->sonet_stats;
24253- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
24254- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
24255- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
24256- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
24257- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
24258- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
24259- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
24260- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
24261- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
24262+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
24263+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
24264+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
24265+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
24266+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
24267+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
24268+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
24269+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
24270+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
24271 }
24272 ia_cmds.status = 0;
24273 break;
24274@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
24275 if ((desc == 0) || (desc > iadev->num_tx_desc))
24276 {
24277 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
24278- atomic_inc(&vcc->stats->tx);
24279+ atomic_inc_unchecked(&vcc->stats->tx);
24280 if (vcc->pop)
24281 vcc->pop(vcc, skb);
24282 else
24283@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
24284 ATM_DESC(skb) = vcc->vci;
24285 skb_queue_tail(&iadev->tx_dma_q, skb);
24286
24287- atomic_inc(&vcc->stats->tx);
24288+ atomic_inc_unchecked(&vcc->stats->tx);
24289 iadev->tx_pkt_cnt++;
24290 /* Increment transaction counter */
24291 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
24292
24293 #if 0
24294 /* add flow control logic */
24295- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
24296+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
24297 if (iavcc->vc_desc_cnt > 10) {
24298 vcc->tx_quota = vcc->tx_quota * 3 / 4;
24299 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
24300diff -urNp linux-3.1.1/drivers/atm/lanai.c linux-3.1.1/drivers/atm/lanai.c
24301--- linux-3.1.1/drivers/atm/lanai.c 2011-11-11 15:19:27.000000000 -0500
24302+++ linux-3.1.1/drivers/atm/lanai.c 2011-11-16 18:39:07.000000000 -0500
24303@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
24304 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
24305 lanai_endtx(lanai, lvcc);
24306 lanai_free_skb(lvcc->tx.atmvcc, skb);
24307- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
24308+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
24309 }
24310
24311 /* Try to fill the buffer - don't call unless there is backlog */
24312@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
24313 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
24314 __net_timestamp(skb);
24315 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
24316- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
24317+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
24318 out:
24319 lvcc->rx.buf.ptr = end;
24320 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
24321@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
24322 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
24323 "vcc %d\n", lanai->number, (unsigned int) s, vci);
24324 lanai->stats.service_rxnotaal5++;
24325- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24326+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24327 return 0;
24328 }
24329 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
24330@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
24331 int bytes;
24332 read_unlock(&vcc_sklist_lock);
24333 DPRINTK("got trashed rx pdu on vci %d\n", vci);
24334- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24335+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24336 lvcc->stats.x.aal5.service_trash++;
24337 bytes = (SERVICE_GET_END(s) * 16) -
24338 (((unsigned long) lvcc->rx.buf.ptr) -
24339@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
24340 }
24341 if (s & SERVICE_STREAM) {
24342 read_unlock(&vcc_sklist_lock);
24343- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24344+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24345 lvcc->stats.x.aal5.service_stream++;
24346 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
24347 "PDU on VCI %d!\n", lanai->number, vci);
24348@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
24349 return 0;
24350 }
24351 DPRINTK("got rx crc error on vci %d\n", vci);
24352- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24353+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24354 lvcc->stats.x.aal5.service_rxcrc++;
24355 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24356 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24357diff -urNp linux-3.1.1/drivers/atm/nicstar.c linux-3.1.1/drivers/atm/nicstar.c
24358--- linux-3.1.1/drivers/atm/nicstar.c 2011-11-11 15:19:27.000000000 -0500
24359+++ linux-3.1.1/drivers/atm/nicstar.c 2011-11-16 18:39:07.000000000 -0500
24360@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24361 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24362 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24363 card->index);
24364- atomic_inc(&vcc->stats->tx_err);
24365+ atomic_inc_unchecked(&vcc->stats->tx_err);
24366 dev_kfree_skb_any(skb);
24367 return -EINVAL;
24368 }
24369@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24370 if (!vc->tx) {
24371 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24372 card->index);
24373- atomic_inc(&vcc->stats->tx_err);
24374+ atomic_inc_unchecked(&vcc->stats->tx_err);
24375 dev_kfree_skb_any(skb);
24376 return -EINVAL;
24377 }
24378@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24379 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24380 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24381 card->index);
24382- atomic_inc(&vcc->stats->tx_err);
24383+ atomic_inc_unchecked(&vcc->stats->tx_err);
24384 dev_kfree_skb_any(skb);
24385 return -EINVAL;
24386 }
24387
24388 if (skb_shinfo(skb)->nr_frags != 0) {
24389 printk("nicstar%d: No scatter-gather yet.\n", card->index);
24390- atomic_inc(&vcc->stats->tx_err);
24391+ atomic_inc_unchecked(&vcc->stats->tx_err);
24392 dev_kfree_skb_any(skb);
24393 return -EINVAL;
24394 }
24395@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24396 }
24397
24398 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24399- atomic_inc(&vcc->stats->tx_err);
24400+ atomic_inc_unchecked(&vcc->stats->tx_err);
24401 dev_kfree_skb_any(skb);
24402 return -EIO;
24403 }
24404- atomic_inc(&vcc->stats->tx);
24405+ atomic_inc_unchecked(&vcc->stats->tx);
24406
24407 return 0;
24408 }
24409@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24410 printk
24411 ("nicstar%d: Can't allocate buffers for aal0.\n",
24412 card->index);
24413- atomic_add(i, &vcc->stats->rx_drop);
24414+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24415 break;
24416 }
24417 if (!atm_charge(vcc, sb->truesize)) {
24418 RXPRINTK
24419 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24420 card->index);
24421- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24422+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24423 dev_kfree_skb_any(sb);
24424 break;
24425 }
24426@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24427 ATM_SKB(sb)->vcc = vcc;
24428 __net_timestamp(sb);
24429 vcc->push(vcc, sb);
24430- atomic_inc(&vcc->stats->rx);
24431+ atomic_inc_unchecked(&vcc->stats->rx);
24432 cell += ATM_CELL_PAYLOAD;
24433 }
24434
24435@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24436 if (iovb == NULL) {
24437 printk("nicstar%d: Out of iovec buffers.\n",
24438 card->index);
24439- atomic_inc(&vcc->stats->rx_drop);
24440+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24441 recycle_rx_buf(card, skb);
24442 return;
24443 }
24444@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24445 small or large buffer itself. */
24446 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24447 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24448- atomic_inc(&vcc->stats->rx_err);
24449+ atomic_inc_unchecked(&vcc->stats->rx_err);
24450 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24451 NS_MAX_IOVECS);
24452 NS_PRV_IOVCNT(iovb) = 0;
24453@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24454 ("nicstar%d: Expected a small buffer, and this is not one.\n",
24455 card->index);
24456 which_list(card, skb);
24457- atomic_inc(&vcc->stats->rx_err);
24458+ atomic_inc_unchecked(&vcc->stats->rx_err);
24459 recycle_rx_buf(card, skb);
24460 vc->rx_iov = NULL;
24461 recycle_iov_buf(card, iovb);
24462@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24463 ("nicstar%d: Expected a large buffer, and this is not one.\n",
24464 card->index);
24465 which_list(card, skb);
24466- atomic_inc(&vcc->stats->rx_err);
24467+ atomic_inc_unchecked(&vcc->stats->rx_err);
24468 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24469 NS_PRV_IOVCNT(iovb));
24470 vc->rx_iov = NULL;
24471@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24472 printk(" - PDU size mismatch.\n");
24473 else
24474 printk(".\n");
24475- atomic_inc(&vcc->stats->rx_err);
24476+ atomic_inc_unchecked(&vcc->stats->rx_err);
24477 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24478 NS_PRV_IOVCNT(iovb));
24479 vc->rx_iov = NULL;
24480@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24481 /* skb points to a small buffer */
24482 if (!atm_charge(vcc, skb->truesize)) {
24483 push_rxbufs(card, skb);
24484- atomic_inc(&vcc->stats->rx_drop);
24485+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24486 } else {
24487 skb_put(skb, len);
24488 dequeue_sm_buf(card, skb);
24489@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24490 ATM_SKB(skb)->vcc = vcc;
24491 __net_timestamp(skb);
24492 vcc->push(vcc, skb);
24493- atomic_inc(&vcc->stats->rx);
24494+ atomic_inc_unchecked(&vcc->stats->rx);
24495 }
24496 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24497 struct sk_buff *sb;
24498@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24499 if (len <= NS_SMBUFSIZE) {
24500 if (!atm_charge(vcc, sb->truesize)) {
24501 push_rxbufs(card, sb);
24502- atomic_inc(&vcc->stats->rx_drop);
24503+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24504 } else {
24505 skb_put(sb, len);
24506 dequeue_sm_buf(card, sb);
24507@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24508 ATM_SKB(sb)->vcc = vcc;
24509 __net_timestamp(sb);
24510 vcc->push(vcc, sb);
24511- atomic_inc(&vcc->stats->rx);
24512+ atomic_inc_unchecked(&vcc->stats->rx);
24513 }
24514
24515 push_rxbufs(card, skb);
24516@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24517
24518 if (!atm_charge(vcc, skb->truesize)) {
24519 push_rxbufs(card, skb);
24520- atomic_inc(&vcc->stats->rx_drop);
24521+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24522 } else {
24523 dequeue_lg_buf(card, skb);
24524 #ifdef NS_USE_DESTRUCTORS
24525@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24526 ATM_SKB(skb)->vcc = vcc;
24527 __net_timestamp(skb);
24528 vcc->push(vcc, skb);
24529- atomic_inc(&vcc->stats->rx);
24530+ atomic_inc_unchecked(&vcc->stats->rx);
24531 }
24532
24533 push_rxbufs(card, sb);
24534@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24535 printk
24536 ("nicstar%d: Out of huge buffers.\n",
24537 card->index);
24538- atomic_inc(&vcc->stats->rx_drop);
24539+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24540 recycle_iovec_rx_bufs(card,
24541 (struct iovec *)
24542 iovb->data,
24543@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24544 card->hbpool.count++;
24545 } else
24546 dev_kfree_skb_any(hb);
24547- atomic_inc(&vcc->stats->rx_drop);
24548+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24549 } else {
24550 /* Copy the small buffer to the huge buffer */
24551 sb = (struct sk_buff *)iov->iov_base;
24552@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24553 #endif /* NS_USE_DESTRUCTORS */
24554 __net_timestamp(hb);
24555 vcc->push(vcc, hb);
24556- atomic_inc(&vcc->stats->rx);
24557+ atomic_inc_unchecked(&vcc->stats->rx);
24558 }
24559 }
24560
24561diff -urNp linux-3.1.1/drivers/atm/solos-pci.c linux-3.1.1/drivers/atm/solos-pci.c
24562--- linux-3.1.1/drivers/atm/solos-pci.c 2011-11-11 15:19:27.000000000 -0500
24563+++ linux-3.1.1/drivers/atm/solos-pci.c 2011-11-16 18:40:10.000000000 -0500
24564@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24565 }
24566 atm_charge(vcc, skb->truesize);
24567 vcc->push(vcc, skb);
24568- atomic_inc(&vcc->stats->rx);
24569+ atomic_inc_unchecked(&vcc->stats->rx);
24570 break;
24571
24572 case PKT_STATUS:
24573@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24574 char msg[500];
24575 char item[10];
24576
24577+ pax_track_stack();
24578+
24579 len = buf->len;
24580 for (i = 0; i < len; i++){
24581 if(i % 8 == 0)
24582@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24583 vcc = SKB_CB(oldskb)->vcc;
24584
24585 if (vcc) {
24586- atomic_inc(&vcc->stats->tx);
24587+ atomic_inc_unchecked(&vcc->stats->tx);
24588 solos_pop(vcc, oldskb);
24589 } else
24590 dev_kfree_skb_irq(oldskb);
24591diff -urNp linux-3.1.1/drivers/atm/suni.c linux-3.1.1/drivers/atm/suni.c
24592--- linux-3.1.1/drivers/atm/suni.c 2011-11-11 15:19:27.000000000 -0500
24593+++ linux-3.1.1/drivers/atm/suni.c 2011-11-16 18:39:07.000000000 -0500
24594@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24595
24596
24597 #define ADD_LIMITED(s,v) \
24598- atomic_add((v),&stats->s); \
24599- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24600+ atomic_add_unchecked((v),&stats->s); \
24601+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24602
24603
24604 static void suni_hz(unsigned long from_timer)
24605diff -urNp linux-3.1.1/drivers/atm/uPD98402.c linux-3.1.1/drivers/atm/uPD98402.c
24606--- linux-3.1.1/drivers/atm/uPD98402.c 2011-11-11 15:19:27.000000000 -0500
24607+++ linux-3.1.1/drivers/atm/uPD98402.c 2011-11-16 18:39:07.000000000 -0500
24608@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24609 struct sonet_stats tmp;
24610 int error = 0;
24611
24612- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24613+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24614 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24615 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24616 if (zero && !error) {
24617@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24618
24619
24620 #define ADD_LIMITED(s,v) \
24621- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24622- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24623- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24624+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24625+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24626+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24627
24628
24629 static void stat_event(struct atm_dev *dev)
24630@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24631 if (reason & uPD98402_INT_PFM) stat_event(dev);
24632 if (reason & uPD98402_INT_PCO) {
24633 (void) GET(PCOCR); /* clear interrupt cause */
24634- atomic_add(GET(HECCT),
24635+ atomic_add_unchecked(GET(HECCT),
24636 &PRIV(dev)->sonet_stats.uncorr_hcs);
24637 }
24638 if ((reason & uPD98402_INT_RFO) &&
24639@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24640 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24641 uPD98402_INT_LOS),PIMR); /* enable them */
24642 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24643- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24644- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24645- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24646+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24647+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24648+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24649 return 0;
24650 }
24651
24652diff -urNp linux-3.1.1/drivers/atm/zatm.c linux-3.1.1/drivers/atm/zatm.c
24653--- linux-3.1.1/drivers/atm/zatm.c 2011-11-11 15:19:27.000000000 -0500
24654+++ linux-3.1.1/drivers/atm/zatm.c 2011-11-16 18:39:07.000000000 -0500
24655@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24656 }
24657 if (!size) {
24658 dev_kfree_skb_irq(skb);
24659- if (vcc) atomic_inc(&vcc->stats->rx_err);
24660+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24661 continue;
24662 }
24663 if (!atm_charge(vcc,skb->truesize)) {
24664@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24665 skb->len = size;
24666 ATM_SKB(skb)->vcc = vcc;
24667 vcc->push(vcc,skb);
24668- atomic_inc(&vcc->stats->rx);
24669+ atomic_inc_unchecked(&vcc->stats->rx);
24670 }
24671 zout(pos & 0xffff,MTA(mbx));
24672 #if 0 /* probably a stupid idea */
24673@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24674 skb_queue_head(&zatm_vcc->backlog,skb);
24675 break;
24676 }
24677- atomic_inc(&vcc->stats->tx);
24678+ atomic_inc_unchecked(&vcc->stats->tx);
24679 wake_up(&zatm_vcc->tx_wait);
24680 }
24681
24682diff -urNp linux-3.1.1/drivers/base/devtmpfs.c linux-3.1.1/drivers/base/devtmpfs.c
24683--- linux-3.1.1/drivers/base/devtmpfs.c 2011-11-11 15:19:27.000000000 -0500
24684+++ linux-3.1.1/drivers/base/devtmpfs.c 2011-11-16 18:39:07.000000000 -0500
24685@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
24686 if (!thread)
24687 return 0;
24688
24689- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24690+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24691 if (err)
24692 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24693 else
24694diff -urNp linux-3.1.1/drivers/base/power/wakeup.c linux-3.1.1/drivers/base/power/wakeup.c
24695--- linux-3.1.1/drivers/base/power/wakeup.c 2011-11-11 15:19:27.000000000 -0500
24696+++ linux-3.1.1/drivers/base/power/wakeup.c 2011-11-16 18:39:07.000000000 -0500
24697@@ -29,14 +29,14 @@ bool events_check_enabled;
24698 * They need to be modified together atomically, so it's better to use one
24699 * atomic variable to hold them both.
24700 */
24701-static atomic_t combined_event_count = ATOMIC_INIT(0);
24702+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24703
24704 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24705 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24706
24707 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24708 {
24709- unsigned int comb = atomic_read(&combined_event_count);
24710+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24711
24712 *cnt = (comb >> IN_PROGRESS_BITS);
24713 *inpr = comb & MAX_IN_PROGRESS;
24714@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24715 ws->last_time = ktime_get();
24716
24717 /* Increment the counter of events in progress. */
24718- atomic_inc(&combined_event_count);
24719+ atomic_inc_unchecked(&combined_event_count);
24720 }
24721
24722 /**
24723@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24724 * Increment the counter of registered wakeup events and decrement the
24725 * couter of wakeup events in progress simultaneously.
24726 */
24727- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24728+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24729 }
24730
24731 /**
24732diff -urNp linux-3.1.1/drivers/block/cciss.c linux-3.1.1/drivers/block/cciss.c
24733--- linux-3.1.1/drivers/block/cciss.c 2011-11-11 15:19:27.000000000 -0500
24734+++ linux-3.1.1/drivers/block/cciss.c 2011-11-16 18:40:10.000000000 -0500
24735@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24736 int err;
24737 u32 cp;
24738
24739+ memset(&arg64, 0, sizeof(arg64));
24740+
24741 err = 0;
24742 err |=
24743 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24744@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24745 while (!list_empty(&h->reqQ)) {
24746 c = list_entry(h->reqQ.next, CommandList_struct, list);
24747 /* can't do anything if fifo is full */
24748- if ((h->access.fifo_full(h))) {
24749+ if ((h->access->fifo_full(h))) {
24750 dev_warn(&h->pdev->dev, "fifo full\n");
24751 break;
24752 }
24753@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24754 h->Qdepth--;
24755
24756 /* Tell the controller execute command */
24757- h->access.submit_command(h, c);
24758+ h->access->submit_command(h, c);
24759
24760 /* Put job onto the completed Q */
24761 addQ(&h->cmpQ, c);
24762@@ -3422,17 +3424,17 @@ startio:
24763
24764 static inline unsigned long get_next_completion(ctlr_info_t *h)
24765 {
24766- return h->access.command_completed(h);
24767+ return h->access->command_completed(h);
24768 }
24769
24770 static inline int interrupt_pending(ctlr_info_t *h)
24771 {
24772- return h->access.intr_pending(h);
24773+ return h->access->intr_pending(h);
24774 }
24775
24776 static inline long interrupt_not_for_us(ctlr_info_t *h)
24777 {
24778- return ((h->access.intr_pending(h) == 0) ||
24779+ return ((h->access->intr_pending(h) == 0) ||
24780 (h->interrupts_enabled == 0));
24781 }
24782
24783@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24784 u32 a;
24785
24786 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24787- return h->access.command_completed(h);
24788+ return h->access->command_completed(h);
24789
24790 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24791 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24792@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24793 trans_support & CFGTBL_Trans_use_short_tags);
24794
24795 /* Change the access methods to the performant access methods */
24796- h->access = SA5_performant_access;
24797+ h->access = &SA5_performant_access;
24798 h->transMethod = CFGTBL_Trans_Performant;
24799
24800 return;
24801@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24802 if (prod_index < 0)
24803 return -ENODEV;
24804 h->product_name = products[prod_index].product_name;
24805- h->access = *(products[prod_index].access);
24806+ h->access = products[prod_index].access;
24807
24808 if (cciss_board_disabled(h)) {
24809 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24810@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
24811 }
24812
24813 /* make sure the board interrupts are off */
24814- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24815+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24816 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24817 if (rc)
24818 goto clean2;
24819@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
24820 * fake ones to scoop up any residual completions.
24821 */
24822 spin_lock_irqsave(&h->lock, flags);
24823- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24824+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24825 spin_unlock_irqrestore(&h->lock, flags);
24826 free_irq(h->intr[PERF_MODE_INT], h);
24827 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24828@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
24829 dev_info(&h->pdev->dev, "Board READY.\n");
24830 dev_info(&h->pdev->dev,
24831 "Waiting for stale completions to drain.\n");
24832- h->access.set_intr_mask(h, CCISS_INTR_ON);
24833+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24834 msleep(10000);
24835- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24836+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24837
24838 rc = controller_reset_failed(h->cfgtable);
24839 if (rc)
24840@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
24841 cciss_scsi_setup(h);
24842
24843 /* Turn the interrupts on so we can service requests */
24844- h->access.set_intr_mask(h, CCISS_INTR_ON);
24845+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24846
24847 /* Get the firmware version */
24848 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24849@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_de
24850 kfree(flush_buf);
24851 if (return_code != IO_OK)
24852 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24853- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24854+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24855 free_irq(h->intr[PERF_MODE_INT], h);
24856 }
24857
24858diff -urNp linux-3.1.1/drivers/block/cciss.h linux-3.1.1/drivers/block/cciss.h
24859--- linux-3.1.1/drivers/block/cciss.h 2011-11-11 15:19:27.000000000 -0500
24860+++ linux-3.1.1/drivers/block/cciss.h 2011-11-16 18:39:07.000000000 -0500
24861@@ -100,7 +100,7 @@ struct ctlr_info
24862 /* information about each logical volume */
24863 drive_info_struct *drv[CISS_MAX_LUN];
24864
24865- struct access_method access;
24866+ struct access_method *access;
24867
24868 /* queue and queue Info */
24869 struct list_head reqQ;
24870diff -urNp linux-3.1.1/drivers/block/cpqarray.c linux-3.1.1/drivers/block/cpqarray.c
24871--- linux-3.1.1/drivers/block/cpqarray.c 2011-11-11 15:19:27.000000000 -0500
24872+++ linux-3.1.1/drivers/block/cpqarray.c 2011-11-16 18:40:10.000000000 -0500
24873@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24874 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24875 goto Enomem4;
24876 }
24877- hba[i]->access.set_intr_mask(hba[i], 0);
24878+ hba[i]->access->set_intr_mask(hba[i], 0);
24879 if (request_irq(hba[i]->intr, do_ida_intr,
24880 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24881 {
24882@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24883 add_timer(&hba[i]->timer);
24884
24885 /* Enable IRQ now that spinlock and rate limit timer are set up */
24886- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24887+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24888
24889 for(j=0; j<NWD; j++) {
24890 struct gendisk *disk = ida_gendisk[i][j];
24891@@ -694,7 +694,7 @@ DBGINFO(
24892 for(i=0; i<NR_PRODUCTS; i++) {
24893 if (board_id == products[i].board_id) {
24894 c->product_name = products[i].product_name;
24895- c->access = *(products[i].access);
24896+ c->access = products[i].access;
24897 break;
24898 }
24899 }
24900@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24901 hba[ctlr]->intr = intr;
24902 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24903 hba[ctlr]->product_name = products[j].product_name;
24904- hba[ctlr]->access = *(products[j].access);
24905+ hba[ctlr]->access = products[j].access;
24906 hba[ctlr]->ctlr = ctlr;
24907 hba[ctlr]->board_id = board_id;
24908 hba[ctlr]->pci_dev = NULL; /* not PCI */
24909@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24910 struct scatterlist tmp_sg[SG_MAX];
24911 int i, dir, seg;
24912
24913+ pax_track_stack();
24914+
24915 queue_next:
24916 creq = blk_peek_request(q);
24917 if (!creq)
24918@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24919
24920 while((c = h->reqQ) != NULL) {
24921 /* Can't do anything if we're busy */
24922- if (h->access.fifo_full(h) == 0)
24923+ if (h->access->fifo_full(h) == 0)
24924 return;
24925
24926 /* Get the first entry from the request Q */
24927@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24928 h->Qdepth--;
24929
24930 /* Tell the controller to do our bidding */
24931- h->access.submit_command(h, c);
24932+ h->access->submit_command(h, c);
24933
24934 /* Get onto the completion Q */
24935 addQ(&h->cmpQ, c);
24936@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24937 unsigned long flags;
24938 __u32 a,a1;
24939
24940- istat = h->access.intr_pending(h);
24941+ istat = h->access->intr_pending(h);
24942 /* Is this interrupt for us? */
24943 if (istat == 0)
24944 return IRQ_NONE;
24945@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24946 */
24947 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24948 if (istat & FIFO_NOT_EMPTY) {
24949- while((a = h->access.command_completed(h))) {
24950+ while((a = h->access->command_completed(h))) {
24951 a1 = a; a &= ~3;
24952 if ((c = h->cmpQ) == NULL)
24953 {
24954@@ -1449,11 +1451,11 @@ static int sendcmd(
24955 /*
24956 * Disable interrupt
24957 */
24958- info_p->access.set_intr_mask(info_p, 0);
24959+ info_p->access->set_intr_mask(info_p, 0);
24960 /* Make sure there is room in the command FIFO */
24961 /* Actually it should be completely empty at this time. */
24962 for (i = 200000; i > 0; i--) {
24963- temp = info_p->access.fifo_full(info_p);
24964+ temp = info_p->access->fifo_full(info_p);
24965 if (temp != 0) {
24966 break;
24967 }
24968@@ -1466,7 +1468,7 @@ DBG(
24969 /*
24970 * Send the cmd
24971 */
24972- info_p->access.submit_command(info_p, c);
24973+ info_p->access->submit_command(info_p, c);
24974 complete = pollcomplete(ctlr);
24975
24976 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24977@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24978 * we check the new geometry. Then turn interrupts back on when
24979 * we're done.
24980 */
24981- host->access.set_intr_mask(host, 0);
24982+ host->access->set_intr_mask(host, 0);
24983 getgeometry(ctlr);
24984- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24985+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24986
24987 for(i=0; i<NWD; i++) {
24988 struct gendisk *disk = ida_gendisk[ctlr][i];
24989@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24990 /* Wait (up to 2 seconds) for a command to complete */
24991
24992 for (i = 200000; i > 0; i--) {
24993- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24994+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24995 if (done == 0) {
24996 udelay(10); /* a short fixed delay */
24997 } else
24998diff -urNp linux-3.1.1/drivers/block/cpqarray.h linux-3.1.1/drivers/block/cpqarray.h
24999--- linux-3.1.1/drivers/block/cpqarray.h 2011-11-11 15:19:27.000000000 -0500
25000+++ linux-3.1.1/drivers/block/cpqarray.h 2011-11-16 18:39:07.000000000 -0500
25001@@ -99,7 +99,7 @@ struct ctlr_info {
25002 drv_info_t drv[NWD];
25003 struct proc_dir_entry *proc;
25004
25005- struct access_method access;
25006+ struct access_method *access;
25007
25008 cmdlist_t *reqQ;
25009 cmdlist_t *cmpQ;
25010diff -urNp linux-3.1.1/drivers/block/DAC960.c linux-3.1.1/drivers/block/DAC960.c
25011--- linux-3.1.1/drivers/block/DAC960.c 2011-11-11 15:19:27.000000000 -0500
25012+++ linux-3.1.1/drivers/block/DAC960.c 2011-11-16 18:40:10.000000000 -0500
25013@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25014 unsigned long flags;
25015 int Channel, TargetID;
25016
25017+ pax_track_stack();
25018+
25019 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25020 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25021 sizeof(DAC960_SCSI_Inquiry_T) +
25022diff -urNp linux-3.1.1/drivers/block/drbd/drbd_int.h linux-3.1.1/drivers/block/drbd/drbd_int.h
25023--- linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-11 15:19:27.000000000 -0500
25024+++ linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-16 18:39:07.000000000 -0500
25025@@ -737,7 +737,7 @@ struct drbd_request;
25026 struct drbd_epoch {
25027 struct list_head list;
25028 unsigned int barrier_nr;
25029- atomic_t epoch_size; /* increased on every request added. */
25030+ atomic_unchecked_t epoch_size; /* increased on every request added. */
25031 atomic_t active; /* increased on every req. added, and dec on every finished. */
25032 unsigned long flags;
25033 };
25034@@ -1109,7 +1109,7 @@ struct drbd_conf {
25035 void *int_dig_in;
25036 void *int_dig_vv;
25037 wait_queue_head_t seq_wait;
25038- atomic_t packet_seq;
25039+ atomic_unchecked_t packet_seq;
25040 unsigned int peer_seq;
25041 spinlock_t peer_seq_lock;
25042 unsigned int minor;
25043@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25044
25045 static inline void drbd_tcp_cork(struct socket *sock)
25046 {
25047- int __user val = 1;
25048+ int val = 1;
25049 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25050- (char __user *)&val, sizeof(val));
25051+ (char __force_user *)&val, sizeof(val));
25052 }
25053
25054 static inline void drbd_tcp_uncork(struct socket *sock)
25055 {
25056- int __user val = 0;
25057+ int val = 0;
25058 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25059- (char __user *)&val, sizeof(val));
25060+ (char __force_user *)&val, sizeof(val));
25061 }
25062
25063 static inline void drbd_tcp_nodelay(struct socket *sock)
25064 {
25065- int __user val = 1;
25066+ int val = 1;
25067 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25068- (char __user *)&val, sizeof(val));
25069+ (char __force_user *)&val, sizeof(val));
25070 }
25071
25072 static inline void drbd_tcp_quickack(struct socket *sock)
25073 {
25074- int __user val = 2;
25075+ int val = 2;
25076 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25077- (char __user *)&val, sizeof(val));
25078+ (char __force_user *)&val, sizeof(val));
25079 }
25080
25081 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25082diff -urNp linux-3.1.1/drivers/block/drbd/drbd_main.c linux-3.1.1/drivers/block/drbd/drbd_main.c
25083--- linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-11 15:19:27.000000000 -0500
25084+++ linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-16 18:39:07.000000000 -0500
25085@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25086 p.sector = sector;
25087 p.block_id = block_id;
25088 p.blksize = blksize;
25089- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25090+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25091
25092 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25093 return false;
25094@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25095 p.sector = cpu_to_be64(req->sector);
25096 p.block_id = (unsigned long)req;
25097 p.seq_num = cpu_to_be32(req->seq_num =
25098- atomic_add_return(1, &mdev->packet_seq));
25099+ atomic_add_return_unchecked(1, &mdev->packet_seq));
25100
25101 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25102
25103@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25104 atomic_set(&mdev->unacked_cnt, 0);
25105 atomic_set(&mdev->local_cnt, 0);
25106 atomic_set(&mdev->net_cnt, 0);
25107- atomic_set(&mdev->packet_seq, 0);
25108+ atomic_set_unchecked(&mdev->packet_seq, 0);
25109 atomic_set(&mdev->pp_in_use, 0);
25110 atomic_set(&mdev->pp_in_use_by_net, 0);
25111 atomic_set(&mdev->rs_sect_in, 0);
25112@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25113 mdev->receiver.t_state);
25114
25115 /* no need to lock it, I'm the only thread alive */
25116- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25117- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25118+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25119+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25120 mdev->al_writ_cnt =
25121 mdev->bm_writ_cnt =
25122 mdev->read_cnt =
25123diff -urNp linux-3.1.1/drivers/block/drbd/drbd_nl.c linux-3.1.1/drivers/block/drbd/drbd_nl.c
25124--- linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-11 15:19:27.000000000 -0500
25125+++ linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-16 18:39:07.000000000 -0500
25126@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25127 module_put(THIS_MODULE);
25128 }
25129
25130-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25131+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25132
25133 static unsigned short *
25134 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25135@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25136 cn_reply->id.idx = CN_IDX_DRBD;
25137 cn_reply->id.val = CN_VAL_DRBD;
25138
25139- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25140+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25141 cn_reply->ack = 0; /* not used here. */
25142 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25143 (int)((char *)tl - (char *)reply->tag_list);
25144@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25145 cn_reply->id.idx = CN_IDX_DRBD;
25146 cn_reply->id.val = CN_VAL_DRBD;
25147
25148- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25149+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25150 cn_reply->ack = 0; /* not used here. */
25151 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25152 (int)((char *)tl - (char *)reply->tag_list);
25153@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25154 cn_reply->id.idx = CN_IDX_DRBD;
25155 cn_reply->id.val = CN_VAL_DRBD;
25156
25157- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25158+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25159 cn_reply->ack = 0; // not used here.
25160 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25161 (int)((char*)tl - (char*)reply->tag_list);
25162@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25163 cn_reply->id.idx = CN_IDX_DRBD;
25164 cn_reply->id.val = CN_VAL_DRBD;
25165
25166- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25167+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25168 cn_reply->ack = 0; /* not used here. */
25169 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25170 (int)((char *)tl - (char *)reply->tag_list);
25171diff -urNp linux-3.1.1/drivers/block/drbd/drbd_receiver.c linux-3.1.1/drivers/block/drbd/drbd_receiver.c
25172--- linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-11 15:19:27.000000000 -0500
25173+++ linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-16 18:39:07.000000000 -0500
25174@@ -894,7 +894,7 @@ retry:
25175 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25176 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25177
25178- atomic_set(&mdev->packet_seq, 0);
25179+ atomic_set_unchecked(&mdev->packet_seq, 0);
25180 mdev->peer_seq = 0;
25181
25182 drbd_thread_start(&mdev->asender);
25183@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25184 do {
25185 next_epoch = NULL;
25186
25187- epoch_size = atomic_read(&epoch->epoch_size);
25188+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25189
25190 switch (ev & ~EV_CLEANUP) {
25191 case EV_PUT:
25192@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25193 rv = FE_DESTROYED;
25194 } else {
25195 epoch->flags = 0;
25196- atomic_set(&epoch->epoch_size, 0);
25197+ atomic_set_unchecked(&epoch->epoch_size, 0);
25198 /* atomic_set(&epoch->active, 0); is already zero */
25199 if (rv == FE_STILL_LIVE)
25200 rv = FE_RECYCLED;
25201@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25202 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25203 drbd_flush(mdev);
25204
25205- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25206+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25207 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
25208 if (epoch)
25209 break;
25210 }
25211
25212 epoch = mdev->current_epoch;
25213- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
25214+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
25215
25216 D_ASSERT(atomic_read(&epoch->active) == 0);
25217 D_ASSERT(epoch->flags == 0);
25218@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
25219 }
25220
25221 epoch->flags = 0;
25222- atomic_set(&epoch->epoch_size, 0);
25223+ atomic_set_unchecked(&epoch->epoch_size, 0);
25224 atomic_set(&epoch->active, 0);
25225
25226 spin_lock(&mdev->epoch_lock);
25227- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25228+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25229 list_add(&epoch->list, &mdev->current_epoch->list);
25230 mdev->current_epoch = epoch;
25231 mdev->epochs++;
25232@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
25233 spin_unlock(&mdev->peer_seq_lock);
25234
25235 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
25236- atomic_inc(&mdev->current_epoch->epoch_size);
25237+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
25238 return drbd_drain_block(mdev, data_size);
25239 }
25240
25241@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
25242
25243 spin_lock(&mdev->epoch_lock);
25244 e->epoch = mdev->current_epoch;
25245- atomic_inc(&e->epoch->epoch_size);
25246+ atomic_inc_unchecked(&e->epoch->epoch_size);
25247 atomic_inc(&e->epoch->active);
25248 spin_unlock(&mdev->epoch_lock);
25249
25250@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
25251 D_ASSERT(list_empty(&mdev->done_ee));
25252
25253 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
25254- atomic_set(&mdev->current_epoch->epoch_size, 0);
25255+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
25256 D_ASSERT(list_empty(&mdev->current_epoch->list));
25257 }
25258
25259diff -urNp linux-3.1.1/drivers/block/loop.c linux-3.1.1/drivers/block/loop.c
25260--- linux-3.1.1/drivers/block/loop.c 2011-11-11 15:19:27.000000000 -0500
25261+++ linux-3.1.1/drivers/block/loop.c 2011-11-16 18:39:07.000000000 -0500
25262@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
25263 mm_segment_t old_fs = get_fs();
25264
25265 set_fs(get_ds());
25266- bw = file->f_op->write(file, buf, len, &pos);
25267+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
25268 set_fs(old_fs);
25269 if (likely(bw == len))
25270 return 0;
25271diff -urNp linux-3.1.1/drivers/block/nbd.c linux-3.1.1/drivers/block/nbd.c
25272--- linux-3.1.1/drivers/block/nbd.c 2011-11-11 15:19:27.000000000 -0500
25273+++ linux-3.1.1/drivers/block/nbd.c 2011-11-16 18:40:10.000000000 -0500
25274@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
25275 struct kvec iov;
25276 sigset_t blocked, oldset;
25277
25278+ pax_track_stack();
25279+
25280 if (unlikely(!sock)) {
25281 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25282 lo->disk->disk_name, (send ? "send" : "recv"));
25283@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
25284 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25285 unsigned int cmd, unsigned long arg)
25286 {
25287+ pax_track_stack();
25288+
25289 switch (cmd) {
25290 case NBD_DISCONNECT: {
25291 struct request sreq;
25292diff -urNp linux-3.1.1/drivers/char/agp/frontend.c linux-3.1.1/drivers/char/agp/frontend.c
25293--- linux-3.1.1/drivers/char/agp/frontend.c 2011-11-11 15:19:27.000000000 -0500
25294+++ linux-3.1.1/drivers/char/agp/frontend.c 2011-11-16 18:39:07.000000000 -0500
25295@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
25296 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
25297 return -EFAULT;
25298
25299- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
25300+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
25301 return -EFAULT;
25302
25303 client = agp_find_client_by_pid(reserve.pid);
25304diff -urNp linux-3.1.1/drivers/char/briq_panel.c linux-3.1.1/drivers/char/briq_panel.c
25305--- linux-3.1.1/drivers/char/briq_panel.c 2011-11-11 15:19:27.000000000 -0500
25306+++ linux-3.1.1/drivers/char/briq_panel.c 2011-11-16 18:40:10.000000000 -0500
25307@@ -9,6 +9,7 @@
25308 #include <linux/types.h>
25309 #include <linux/errno.h>
25310 #include <linux/tty.h>
25311+#include <linux/mutex.h>
25312 #include <linux/timer.h>
25313 #include <linux/kernel.h>
25314 #include <linux/wait.h>
25315@@ -34,6 +35,7 @@ static int vfd_is_open;
25316 static unsigned char vfd[40];
25317 static int vfd_cursor;
25318 static unsigned char ledpb, led;
25319+static DEFINE_MUTEX(vfd_mutex);
25320
25321 static void update_vfd(void)
25322 {
25323@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
25324 if (!vfd_is_open)
25325 return -EBUSY;
25326
25327+ mutex_lock(&vfd_mutex);
25328 for (;;) {
25329 char c;
25330 if (!indx)
25331 break;
25332- if (get_user(c, buf))
25333+ if (get_user(c, buf)) {
25334+ mutex_unlock(&vfd_mutex);
25335 return -EFAULT;
25336+ }
25337 if (esc) {
25338 set_led(c);
25339 esc = 0;
25340@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25341 buf++;
25342 }
25343 update_vfd();
25344+ mutex_unlock(&vfd_mutex);
25345
25346 return len;
25347 }
25348diff -urNp linux-3.1.1/drivers/char/genrtc.c linux-3.1.1/drivers/char/genrtc.c
25349--- linux-3.1.1/drivers/char/genrtc.c 2011-11-11 15:19:27.000000000 -0500
25350+++ linux-3.1.1/drivers/char/genrtc.c 2011-11-16 18:40:10.000000000 -0500
25351@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25352 switch (cmd) {
25353
25354 case RTC_PLL_GET:
25355+ memset(&pll, 0, sizeof(pll));
25356 if (get_rtc_pll(&pll))
25357 return -EINVAL;
25358 else
25359diff -urNp linux-3.1.1/drivers/char/hpet.c linux-3.1.1/drivers/char/hpet.c
25360--- linux-3.1.1/drivers/char/hpet.c 2011-11-11 15:19:27.000000000 -0500
25361+++ linux-3.1.1/drivers/char/hpet.c 2011-11-16 18:39:07.000000000 -0500
25362@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25363 }
25364
25365 static int
25366-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25367+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25368 struct hpet_info *info)
25369 {
25370 struct hpet_timer __iomem *timer;
25371diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c
25372--- linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-11 15:19:27.000000000 -0500
25373+++ linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-16 18:40:10.000000000 -0500
25374@@ -415,7 +415,7 @@ struct ipmi_smi {
25375 struct proc_dir_entry *proc_dir;
25376 char proc_dir_name[10];
25377
25378- atomic_t stats[IPMI_NUM_STATS];
25379+ atomic_unchecked_t stats[IPMI_NUM_STATS];
25380
25381 /*
25382 * run_to_completion duplicate of smb_info, smi_info
25383@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25384
25385
25386 #define ipmi_inc_stat(intf, stat) \
25387- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25388+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25389 #define ipmi_get_stat(intf, stat) \
25390- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25391+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25392
25393 static int is_lan_addr(struct ipmi_addr *addr)
25394 {
25395@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25396 INIT_LIST_HEAD(&intf->cmd_rcvrs);
25397 init_waitqueue_head(&intf->waitq);
25398 for (i = 0; i < IPMI_NUM_STATS; i++)
25399- atomic_set(&intf->stats[i], 0);
25400+ atomic_set_unchecked(&intf->stats[i], 0);
25401
25402 intf->proc_dir = NULL;
25403
25404@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25405 struct ipmi_smi_msg smi_msg;
25406 struct ipmi_recv_msg recv_msg;
25407
25408+ pax_track_stack();
25409+
25410 si = (struct ipmi_system_interface_addr *) &addr;
25411 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25412 si->channel = IPMI_BMC_CHANNEL;
25413diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c
25414--- linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-11 15:19:27.000000000 -0500
25415+++ linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-16 18:39:07.000000000 -0500
25416@@ -277,7 +277,7 @@ struct smi_info {
25417 unsigned char slave_addr;
25418
25419 /* Counters and things for the proc filesystem. */
25420- atomic_t stats[SI_NUM_STATS];
25421+ atomic_unchecked_t stats[SI_NUM_STATS];
25422
25423 struct task_struct *thread;
25424
25425@@ -286,9 +286,9 @@ struct smi_info {
25426 };
25427
25428 #define smi_inc_stat(smi, stat) \
25429- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25430+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25431 #define smi_get_stat(smi, stat) \
25432- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25433+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25434
25435 #define SI_MAX_PARMS 4
25436
25437@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25438 atomic_set(&new_smi->req_events, 0);
25439 new_smi->run_to_completion = 0;
25440 for (i = 0; i < SI_NUM_STATS; i++)
25441- atomic_set(&new_smi->stats[i], 0);
25442+ atomic_set_unchecked(&new_smi->stats[i], 0);
25443
25444 new_smi->interrupt_disabled = 1;
25445 atomic_set(&new_smi->stop_operation, 0);
25446diff -urNp linux-3.1.1/drivers/char/Kconfig linux-3.1.1/drivers/char/Kconfig
25447--- linux-3.1.1/drivers/char/Kconfig 2011-11-11 15:19:27.000000000 -0500
25448+++ linux-3.1.1/drivers/char/Kconfig 2011-11-16 18:40:10.000000000 -0500
25449@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
25450
25451 config DEVKMEM
25452 bool "/dev/kmem virtual device support"
25453- default y
25454+ default n
25455+ depends on !GRKERNSEC_KMEM
25456 help
25457 Say Y here if you want to support the /dev/kmem device. The
25458 /dev/kmem device is rarely used, but can be used for certain
25459@@ -596,6 +597,7 @@ config DEVPORT
25460 bool
25461 depends on !M68K
25462 depends on ISA || PCI
25463+ depends on !GRKERNSEC_KMEM
25464 default y
25465
25466 source "drivers/s390/char/Kconfig"
25467diff -urNp linux-3.1.1/drivers/char/mbcs.c linux-3.1.1/drivers/char/mbcs.c
25468--- linux-3.1.1/drivers/char/mbcs.c 2011-11-11 15:19:27.000000000 -0500
25469+++ linux-3.1.1/drivers/char/mbcs.c 2011-11-16 18:39:07.000000000 -0500
25470@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25471 return 0;
25472 }
25473
25474-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25475+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25476 {
25477 .part_num = MBCS_PART_NUM,
25478 .mfg_num = MBCS_MFG_NUM,
25479diff -urNp linux-3.1.1/drivers/char/mem.c linux-3.1.1/drivers/char/mem.c
25480--- linux-3.1.1/drivers/char/mem.c 2011-11-11 15:19:27.000000000 -0500
25481+++ linux-3.1.1/drivers/char/mem.c 2011-11-16 18:40:10.000000000 -0500
25482@@ -18,6 +18,7 @@
25483 #include <linux/raw.h>
25484 #include <linux/tty.h>
25485 #include <linux/capability.h>
25486+#include <linux/security.h>
25487 #include <linux/ptrace.h>
25488 #include <linux/device.h>
25489 #include <linux/highmem.h>
25490@@ -34,6 +35,10 @@
25491 # include <linux/efi.h>
25492 #endif
25493
25494+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25495+extern struct file_operations grsec_fops;
25496+#endif
25497+
25498 static inline unsigned long size_inside_page(unsigned long start,
25499 unsigned long size)
25500 {
25501@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25502
25503 while (cursor < to) {
25504 if (!devmem_is_allowed(pfn)) {
25505+#ifdef CONFIG_GRKERNSEC_KMEM
25506+ gr_handle_mem_readwrite(from, to);
25507+#else
25508 printk(KERN_INFO
25509 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25510 current->comm, from, to);
25511+#endif
25512 return 0;
25513 }
25514 cursor += PAGE_SIZE;
25515@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25516 }
25517 return 1;
25518 }
25519+#elif defined(CONFIG_GRKERNSEC_KMEM)
25520+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25521+{
25522+ return 0;
25523+}
25524 #else
25525 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25526 {
25527@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25528
25529 while (count > 0) {
25530 unsigned long remaining;
25531+ char *temp;
25532
25533 sz = size_inside_page(p, count);
25534
25535@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25536 if (!ptr)
25537 return -EFAULT;
25538
25539- remaining = copy_to_user(buf, ptr, sz);
25540+#ifdef CONFIG_PAX_USERCOPY
25541+ temp = kmalloc(sz, GFP_KERNEL);
25542+ if (!temp) {
25543+ unxlate_dev_mem_ptr(p, ptr);
25544+ return -ENOMEM;
25545+ }
25546+ memcpy(temp, ptr, sz);
25547+#else
25548+ temp = ptr;
25549+#endif
25550+
25551+ remaining = copy_to_user(buf, temp, sz);
25552+
25553+#ifdef CONFIG_PAX_USERCOPY
25554+ kfree(temp);
25555+#endif
25556+
25557 unxlate_dev_mem_ptr(p, ptr);
25558 if (remaining)
25559 return -EFAULT;
25560@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25561 size_t count, loff_t *ppos)
25562 {
25563 unsigned long p = *ppos;
25564- ssize_t low_count, read, sz;
25565+ ssize_t low_count, read, sz, err = 0;
25566 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25567- int err = 0;
25568
25569 read = 0;
25570 if (p < (unsigned long) high_memory) {
25571@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25572 }
25573 #endif
25574 while (low_count > 0) {
25575+ char *temp;
25576+
25577 sz = size_inside_page(p, low_count);
25578
25579 /*
25580@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25581 */
25582 kbuf = xlate_dev_kmem_ptr((char *)p);
25583
25584- if (copy_to_user(buf, kbuf, sz))
25585+#ifdef CONFIG_PAX_USERCOPY
25586+ temp = kmalloc(sz, GFP_KERNEL);
25587+ if (!temp)
25588+ return -ENOMEM;
25589+ memcpy(temp, kbuf, sz);
25590+#else
25591+ temp = kbuf;
25592+#endif
25593+
25594+ err = copy_to_user(buf, temp, sz);
25595+
25596+#ifdef CONFIG_PAX_USERCOPY
25597+ kfree(temp);
25598+#endif
25599+
25600+ if (err)
25601 return -EFAULT;
25602 buf += sz;
25603 p += sz;
25604@@ -866,6 +913,9 @@ static const struct memdev {
25605 #ifdef CONFIG_CRASH_DUMP
25606 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25607 #endif
25608+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25609+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25610+#endif
25611 };
25612
25613 static int memory_open(struct inode *inode, struct file *filp)
25614diff -urNp linux-3.1.1/drivers/char/nvram.c linux-3.1.1/drivers/char/nvram.c
25615--- linux-3.1.1/drivers/char/nvram.c 2011-11-11 15:19:27.000000000 -0500
25616+++ linux-3.1.1/drivers/char/nvram.c 2011-11-16 18:39:07.000000000 -0500
25617@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *f
25618
25619 spin_unlock_irq(&rtc_lock);
25620
25621- if (copy_to_user(buf, contents, tmp - contents))
25622+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25623 return -EFAULT;
25624
25625 *ppos = i;
25626diff -urNp linux-3.1.1/drivers/char/random.c linux-3.1.1/drivers/char/random.c
25627--- linux-3.1.1/drivers/char/random.c 2011-11-11 15:19:27.000000000 -0500
25628+++ linux-3.1.1/drivers/char/random.c 2011-11-16 18:40:10.000000000 -0500
25629@@ -261,8 +261,13 @@
25630 /*
25631 * Configuration information
25632 */
25633+#ifdef CONFIG_GRKERNSEC_RANDNET
25634+#define INPUT_POOL_WORDS 512
25635+#define OUTPUT_POOL_WORDS 128
25636+#else
25637 #define INPUT_POOL_WORDS 128
25638 #define OUTPUT_POOL_WORDS 32
25639+#endif
25640 #define SEC_XFER_SIZE 512
25641 #define EXTRACT_SIZE 10
25642
25643@@ -300,10 +305,17 @@ static struct poolinfo {
25644 int poolwords;
25645 int tap1, tap2, tap3, tap4, tap5;
25646 } poolinfo_table[] = {
25647+#ifdef CONFIG_GRKERNSEC_RANDNET
25648+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25649+ { 512, 411, 308, 208, 104, 1 },
25650+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25651+ { 128, 103, 76, 51, 25, 1 },
25652+#else
25653 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25654 { 128, 103, 76, 51, 25, 1 },
25655 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25656 { 32, 26, 20, 14, 7, 1 },
25657+#endif
25658 #if 0
25659 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25660 { 2048, 1638, 1231, 819, 411, 1 },
25661@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25662
25663 extract_buf(r, tmp);
25664 i = min_t(int, nbytes, EXTRACT_SIZE);
25665- if (copy_to_user(buf, tmp, i)) {
25666+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25667 ret = -EFAULT;
25668 break;
25669 }
25670@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25671 #include <linux/sysctl.h>
25672
25673 static int min_read_thresh = 8, min_write_thresh;
25674-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25675+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25676 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25677 static char sysctl_bootid[16];
25678
25679diff -urNp linux-3.1.1/drivers/char/sonypi.c linux-3.1.1/drivers/char/sonypi.c
25680--- linux-3.1.1/drivers/char/sonypi.c 2011-11-11 15:19:27.000000000 -0500
25681+++ linux-3.1.1/drivers/char/sonypi.c 2011-11-16 18:39:07.000000000 -0500
25682@@ -55,6 +55,7 @@
25683 #include <asm/uaccess.h>
25684 #include <asm/io.h>
25685 #include <asm/system.h>
25686+#include <asm/local.h>
25687
25688 #include <linux/sonypi.h>
25689
25690@@ -491,7 +492,7 @@ static struct sonypi_device {
25691 spinlock_t fifo_lock;
25692 wait_queue_head_t fifo_proc_list;
25693 struct fasync_struct *fifo_async;
25694- int open_count;
25695+ local_t open_count;
25696 int model;
25697 struct input_dev *input_jog_dev;
25698 struct input_dev *input_key_dev;
25699@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25700 static int sonypi_misc_release(struct inode *inode, struct file *file)
25701 {
25702 mutex_lock(&sonypi_device.lock);
25703- sonypi_device.open_count--;
25704+ local_dec(&sonypi_device.open_count);
25705 mutex_unlock(&sonypi_device.lock);
25706 return 0;
25707 }
25708@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25709 {
25710 mutex_lock(&sonypi_device.lock);
25711 /* Flush input queue on first open */
25712- if (!sonypi_device.open_count)
25713+ if (!local_read(&sonypi_device.open_count))
25714 kfifo_reset(&sonypi_device.fifo);
25715- sonypi_device.open_count++;
25716+ local_inc(&sonypi_device.open_count);
25717 mutex_unlock(&sonypi_device.lock);
25718
25719 return 0;
25720diff -urNp linux-3.1.1/drivers/char/tpm/tpm_bios.c linux-3.1.1/drivers/char/tpm/tpm_bios.c
25721--- linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-11 15:19:27.000000000 -0500
25722+++ linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-16 18:39:07.000000000 -0500
25723@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25724 event = addr;
25725
25726 if ((event->event_type == 0 && event->event_size == 0) ||
25727- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25728+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25729 return NULL;
25730
25731 return addr;
25732@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25733 return NULL;
25734
25735 if ((event->event_type == 0 && event->event_size == 0) ||
25736- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25737+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25738 return NULL;
25739
25740 (*pos)++;
25741@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25742 int i;
25743
25744 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25745- seq_putc(m, data[i]);
25746+ if (!seq_putc(m, data[i]))
25747+ return -EFAULT;
25748
25749 return 0;
25750 }
25751@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25752 log->bios_event_log_end = log->bios_event_log + len;
25753
25754 virt = acpi_os_map_memory(start, len);
25755+ if (!virt) {
25756+ kfree(log->bios_event_log);
25757+ log->bios_event_log = NULL;
25758+ return -EFAULT;
25759+ }
25760
25761- memcpy(log->bios_event_log, virt, len);
25762+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25763
25764 acpi_os_unmap_memory(virt, len);
25765 return 0;
25766diff -urNp linux-3.1.1/drivers/char/tpm/tpm.c linux-3.1.1/drivers/char/tpm/tpm.c
25767--- linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-11 15:19:27.000000000 -0500
25768+++ linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-16 18:40:10.000000000 -0500
25769@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25770 chip->vendor.req_complete_val)
25771 goto out_recv;
25772
25773- if ((status == chip->vendor.req_canceled)) {
25774+ if (status == chip->vendor.req_canceled) {
25775 dev_err(chip->dev, "Operation Canceled\n");
25776 rc = -ECANCELED;
25777 goto out;
25778@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *de
25779
25780 struct tpm_chip *chip = dev_get_drvdata(dev);
25781
25782+ pax_track_stack();
25783+
25784 tpm_cmd.header.in = tpm_readpubek_header;
25785 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25786 "attempting to read the PUBEK");
25787diff -urNp linux-3.1.1/drivers/char/virtio_console.c linux-3.1.1/drivers/char/virtio_console.c
25788--- linux-3.1.1/drivers/char/virtio_console.c 2011-11-11 15:19:27.000000000 -0500
25789+++ linux-3.1.1/drivers/char/virtio_console.c 2011-11-16 18:39:07.000000000 -0500
25790@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25791 if (to_user) {
25792 ssize_t ret;
25793
25794- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25795+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25796 if (ret)
25797 return -EFAULT;
25798 } else {
25799@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25800 if (!port_has_data(port) && !port->host_connected)
25801 return 0;
25802
25803- return fill_readbuf(port, ubuf, count, true);
25804+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25805 }
25806
25807 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25808diff -urNp linux-3.1.1/drivers/crypto/hifn_795x.c linux-3.1.1/drivers/crypto/hifn_795x.c
25809--- linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-11 15:19:27.000000000 -0500
25810+++ linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-16 18:40:10.000000000 -0500
25811@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25812 0xCA, 0x34, 0x2B, 0x2E};
25813 struct scatterlist sg;
25814
25815+ pax_track_stack();
25816+
25817 memset(src, 0, sizeof(src));
25818 memset(ctx.key, 0, sizeof(ctx.key));
25819
25820diff -urNp linux-3.1.1/drivers/crypto/padlock-aes.c linux-3.1.1/drivers/crypto/padlock-aes.c
25821--- linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-11 15:19:27.000000000 -0500
25822+++ linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-16 18:40:10.000000000 -0500
25823@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25824 struct crypto_aes_ctx gen_aes;
25825 int cpu;
25826
25827+ pax_track_stack();
25828+
25829 if (key_len % 8) {
25830 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25831 return -EINVAL;
25832diff -urNp linux-3.1.1/drivers/edac/amd64_edac.c linux-3.1.1/drivers/edac/amd64_edac.c
25833--- linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-11 15:19:27.000000000 -0500
25834+++ linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-16 18:39:07.000000000 -0500
25835@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25836 * PCI core identifies what devices are on a system during boot, and then
25837 * inquiry this table to see if this driver is for a given device found.
25838 */
25839-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25840+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25841 {
25842 .vendor = PCI_VENDOR_ID_AMD,
25843 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25844diff -urNp linux-3.1.1/drivers/edac/amd76x_edac.c linux-3.1.1/drivers/edac/amd76x_edac.c
25845--- linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-11 15:19:27.000000000 -0500
25846+++ linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-16 18:39:07.000000000 -0500
25847@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25848 edac_mc_free(mci);
25849 }
25850
25851-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25852+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25853 {
25854 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25855 AMD762},
25856diff -urNp linux-3.1.1/drivers/edac/e752x_edac.c linux-3.1.1/drivers/edac/e752x_edac.c
25857--- linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-11 15:19:27.000000000 -0500
25858+++ linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-16 18:39:07.000000000 -0500
25859@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25860 edac_mc_free(mci);
25861 }
25862
25863-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25864+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25865 {
25866 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25867 E7520},
25868diff -urNp linux-3.1.1/drivers/edac/e7xxx_edac.c linux-3.1.1/drivers/edac/e7xxx_edac.c
25869--- linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-11 15:19:27.000000000 -0500
25870+++ linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-16 18:39:07.000000000 -0500
25871@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25872 edac_mc_free(mci);
25873 }
25874
25875-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25876+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25877 {
25878 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25879 E7205},
25880diff -urNp linux-3.1.1/drivers/edac/edac_pci_sysfs.c linux-3.1.1/drivers/edac/edac_pci_sysfs.c
25881--- linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-11 15:19:27.000000000 -0500
25882+++ linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-16 18:39:07.000000000 -0500
25883@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25884 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25885 static int edac_pci_poll_msec = 1000; /* one second workq period */
25886
25887-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25888-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25889+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25890+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25891
25892 static struct kobject *edac_pci_top_main_kobj;
25893 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25894@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25895 edac_printk(KERN_CRIT, EDAC_PCI,
25896 "Signaled System Error on %s\n",
25897 pci_name(dev));
25898- atomic_inc(&pci_nonparity_count);
25899+ atomic_inc_unchecked(&pci_nonparity_count);
25900 }
25901
25902 if (status & (PCI_STATUS_PARITY)) {
25903@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25904 "Master Data Parity Error on %s\n",
25905 pci_name(dev));
25906
25907- atomic_inc(&pci_parity_count);
25908+ atomic_inc_unchecked(&pci_parity_count);
25909 }
25910
25911 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25912@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25913 "Detected Parity Error on %s\n",
25914 pci_name(dev));
25915
25916- atomic_inc(&pci_parity_count);
25917+ atomic_inc_unchecked(&pci_parity_count);
25918 }
25919 }
25920
25921@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25922 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25923 "Signaled System Error on %s\n",
25924 pci_name(dev));
25925- atomic_inc(&pci_nonparity_count);
25926+ atomic_inc_unchecked(&pci_nonparity_count);
25927 }
25928
25929 if (status & (PCI_STATUS_PARITY)) {
25930@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25931 "Master Data Parity Error on "
25932 "%s\n", pci_name(dev));
25933
25934- atomic_inc(&pci_parity_count);
25935+ atomic_inc_unchecked(&pci_parity_count);
25936 }
25937
25938 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25939@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25940 "Detected Parity Error on %s\n",
25941 pci_name(dev));
25942
25943- atomic_inc(&pci_parity_count);
25944+ atomic_inc_unchecked(&pci_parity_count);
25945 }
25946 }
25947 }
25948@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25949 if (!check_pci_errors)
25950 return;
25951
25952- before_count = atomic_read(&pci_parity_count);
25953+ before_count = atomic_read_unchecked(&pci_parity_count);
25954
25955 /* scan all PCI devices looking for a Parity Error on devices and
25956 * bridges.
25957@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25958 /* Only if operator has selected panic on PCI Error */
25959 if (edac_pci_get_panic_on_pe()) {
25960 /* If the count is different 'after' from 'before' */
25961- if (before_count != atomic_read(&pci_parity_count))
25962+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25963 panic("EDAC: PCI Parity Error");
25964 }
25965 }
25966diff -urNp linux-3.1.1/drivers/edac/i3000_edac.c linux-3.1.1/drivers/edac/i3000_edac.c
25967--- linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-11 15:19:27.000000000 -0500
25968+++ linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-16 18:39:07.000000000 -0500
25969@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25970 edac_mc_free(mci);
25971 }
25972
25973-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25974+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25975 {
25976 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25977 I3000},
25978diff -urNp linux-3.1.1/drivers/edac/i3200_edac.c linux-3.1.1/drivers/edac/i3200_edac.c
25979--- linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-11 15:19:27.000000000 -0500
25980+++ linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-16 18:39:07.000000000 -0500
25981@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25982 edac_mc_free(mci);
25983 }
25984
25985-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25986+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25987 {
25988 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25989 I3200},
25990diff -urNp linux-3.1.1/drivers/edac/i5000_edac.c linux-3.1.1/drivers/edac/i5000_edac.c
25991--- linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-11 15:19:27.000000000 -0500
25992+++ linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-16 18:39:07.000000000 -0500
25993@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25994 *
25995 * The "E500P" device is the first device supported.
25996 */
25997-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25998+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25999 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
26000 .driver_data = I5000P},
26001
26002diff -urNp linux-3.1.1/drivers/edac/i5100_edac.c linux-3.1.1/drivers/edac/i5100_edac.c
26003--- linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-11 15:19:27.000000000 -0500
26004+++ linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-16 18:39:07.000000000 -0500
26005@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
26006 edac_mc_free(mci);
26007 }
26008
26009-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
26010+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
26011 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
26012 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
26013 { 0, }
26014diff -urNp linux-3.1.1/drivers/edac/i5400_edac.c linux-3.1.1/drivers/edac/i5400_edac.c
26015--- linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-11 15:19:27.000000000 -0500
26016+++ linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-16 18:39:07.000000000 -0500
26017@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26018 *
26019 * The "E500P" device is the first device supported.
26020 */
26021-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26022+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26023 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26024 {0,} /* 0 terminated list. */
26025 };
26026diff -urNp linux-3.1.1/drivers/edac/i7300_edac.c linux-3.1.1/drivers/edac/i7300_edac.c
26027--- linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-11 15:19:27.000000000 -0500
26028+++ linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-16 18:39:07.000000000 -0500
26029@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26030 *
26031 * Has only 8086:360c PCI ID
26032 */
26033-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26034+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26035 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26036 {0,} /* 0 terminated list. */
26037 };
26038diff -urNp linux-3.1.1/drivers/edac/i7core_edac.c linux-3.1.1/drivers/edac/i7core_edac.c
26039--- linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-11 15:19:27.000000000 -0500
26040+++ linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-16 18:39:07.000000000 -0500
26041@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26042 /*
26043 * pci_device_id table for which devices we are looking for
26044 */
26045-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26046+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26047 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26048 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26049 {0,} /* 0 terminated list. */
26050diff -urNp linux-3.1.1/drivers/edac/i82443bxgx_edac.c linux-3.1.1/drivers/edac/i82443bxgx_edac.c
26051--- linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-11 15:19:27.000000000 -0500
26052+++ linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-16 18:39:07.000000000 -0500
26053@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26054
26055 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26056
26057-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26058+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26059 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26060 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26061 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26062diff -urNp linux-3.1.1/drivers/edac/i82860_edac.c linux-3.1.1/drivers/edac/i82860_edac.c
26063--- linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-11 15:19:27.000000000 -0500
26064+++ linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-16 18:39:07.000000000 -0500
26065@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26066 edac_mc_free(mci);
26067 }
26068
26069-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26070+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26071 {
26072 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26073 I82860},
26074diff -urNp linux-3.1.1/drivers/edac/i82875p_edac.c linux-3.1.1/drivers/edac/i82875p_edac.c
26075--- linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-11 15:19:27.000000000 -0500
26076+++ linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-16 18:39:07.000000000 -0500
26077@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26078 edac_mc_free(mci);
26079 }
26080
26081-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26082+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26083 {
26084 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26085 I82875P},
26086diff -urNp linux-3.1.1/drivers/edac/i82975x_edac.c linux-3.1.1/drivers/edac/i82975x_edac.c
26087--- linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-11 15:19:27.000000000 -0500
26088+++ linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-16 18:39:07.000000000 -0500
26089@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26090 edac_mc_free(mci);
26091 }
26092
26093-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26094+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26095 {
26096 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26097 I82975X
26098diff -urNp linux-3.1.1/drivers/edac/mce_amd.h linux-3.1.1/drivers/edac/mce_amd.h
26099--- linux-3.1.1/drivers/edac/mce_amd.h 2011-11-11 15:19:27.000000000 -0500
26100+++ linux-3.1.1/drivers/edac/mce_amd.h 2011-11-16 18:39:07.000000000 -0500
26101@@ -83,7 +83,7 @@ struct amd_decoder_ops {
26102 bool (*dc_mce)(u16, u8);
26103 bool (*ic_mce)(u16, u8);
26104 bool (*nb_mce)(u16, u8);
26105-};
26106+} __no_const;
26107
26108 void amd_report_gart_errors(bool);
26109 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26110diff -urNp linux-3.1.1/drivers/edac/r82600_edac.c linux-3.1.1/drivers/edac/r82600_edac.c
26111--- linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-11 15:19:27.000000000 -0500
26112+++ linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-16 18:39:07.000000000 -0500
26113@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26114 edac_mc_free(mci);
26115 }
26116
26117-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26118+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26119 {
26120 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26121 },
26122diff -urNp linux-3.1.1/drivers/edac/x38_edac.c linux-3.1.1/drivers/edac/x38_edac.c
26123--- linux-3.1.1/drivers/edac/x38_edac.c 2011-11-11 15:19:27.000000000 -0500
26124+++ linux-3.1.1/drivers/edac/x38_edac.c 2011-11-16 18:39:07.000000000 -0500
26125@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26126 edac_mc_free(mci);
26127 }
26128
26129-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26130+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26131 {
26132 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26133 X38},
26134diff -urNp linux-3.1.1/drivers/firewire/core-card.c linux-3.1.1/drivers/firewire/core-card.c
26135--- linux-3.1.1/drivers/firewire/core-card.c 2011-11-11 15:19:27.000000000 -0500
26136+++ linux-3.1.1/drivers/firewire/core-card.c 2011-11-16 18:39:07.000000000 -0500
26137@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26138
26139 void fw_core_remove_card(struct fw_card *card)
26140 {
26141- struct fw_card_driver dummy_driver = dummy_driver_template;
26142+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
26143
26144 card->driver->update_phy_reg(card, 4,
26145 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26146diff -urNp linux-3.1.1/drivers/firewire/core-cdev.c linux-3.1.1/drivers/firewire/core-cdev.c
26147--- linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-11 15:19:27.000000000 -0500
26148+++ linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-16 18:39:07.000000000 -0500
26149@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct clie
26150 int ret;
26151
26152 if ((request->channels == 0 && request->bandwidth == 0) ||
26153- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26154- request->bandwidth < 0)
26155+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26156 return -EINVAL;
26157
26158 r = kmalloc(sizeof(*r), GFP_KERNEL);
26159diff -urNp linux-3.1.1/drivers/firewire/core.h linux-3.1.1/drivers/firewire/core.h
26160--- linux-3.1.1/drivers/firewire/core.h 2011-11-11 15:19:27.000000000 -0500
26161+++ linux-3.1.1/drivers/firewire/core.h 2011-11-16 18:39:07.000000000 -0500
26162@@ -101,6 +101,7 @@ struct fw_card_driver {
26163
26164 int (*stop_iso)(struct fw_iso_context *ctx);
26165 };
26166+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26167
26168 void fw_card_initialize(struct fw_card *card,
26169 const struct fw_card_driver *driver, struct device *device);
26170diff -urNp linux-3.1.1/drivers/firewire/core-transaction.c linux-3.1.1/drivers/firewire/core-transaction.c
26171--- linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-11 15:19:27.000000000 -0500
26172+++ linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-16 18:40:10.000000000 -0500
26173@@ -37,6 +37,7 @@
26174 #include <linux/timer.h>
26175 #include <linux/types.h>
26176 #include <linux/workqueue.h>
26177+#include <linux/sched.h>
26178
26179 #include <asm/byteorder.h>
26180
26181@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26182 struct transaction_callback_data d;
26183 struct fw_transaction t;
26184
26185+ pax_track_stack();
26186+
26187 init_timer_on_stack(&t.split_timeout_timer);
26188 init_completion(&d.done);
26189 d.payload = payload;
26190diff -urNp linux-3.1.1/drivers/firmware/dmi_scan.c linux-3.1.1/drivers/firmware/dmi_scan.c
26191--- linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-11 15:19:27.000000000 -0500
26192+++ linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-16 18:39:07.000000000 -0500
26193@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26194 }
26195 }
26196 else {
26197- /*
26198- * no iounmap() for that ioremap(); it would be a no-op, but
26199- * it's so early in setup that sucker gets confused into doing
26200- * what it shouldn't if we actually call it.
26201- */
26202 p = dmi_ioremap(0xF0000, 0x10000);
26203 if (p == NULL)
26204 goto error;
26205@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26206 if (buf == NULL)
26207 return -1;
26208
26209- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
26210+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
26211
26212 iounmap(buf);
26213 return 0;
26214diff -urNp linux-3.1.1/drivers/gpio/gpio-vr41xx.c linux-3.1.1/drivers/gpio/gpio-vr41xx.c
26215--- linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-11 15:19:27.000000000 -0500
26216+++ linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-16 18:39:07.000000000 -0500
26217@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
26218 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
26219 maskl, pendl, maskh, pendh);
26220
26221- atomic_inc(&irq_err_count);
26222+ atomic_inc_unchecked(&irq_err_count);
26223
26224 return -EINVAL;
26225 }
26226diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc.c linux-3.1.1/drivers/gpu/drm/drm_crtc.c
26227--- linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-11 15:19:27.000000000 -0500
26228+++ linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-16 18:39:07.000000000 -0500
26229@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_dev
26230 */
26231 if ((out_resp->count_modes >= mode_count) && mode_count) {
26232 copied = 0;
26233- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
26234+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
26235 list_for_each_entry(mode, &connector->modes, head) {
26236 drm_crtc_convert_to_umode(&u_mode, mode);
26237 if (copy_to_user(mode_ptr + copied,
26238@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_dev
26239
26240 if ((out_resp->count_props >= props_count) && props_count) {
26241 copied = 0;
26242- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
26243- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
26244+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
26245+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
26246 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
26247 if (connector->property_ids[i] != 0) {
26248 if (put_user(connector->property_ids[i],
26249@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_dev
26250
26251 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
26252 copied = 0;
26253- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
26254+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
26255 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
26256 if (connector->encoder_ids[i] != 0) {
26257 if (put_user(connector->encoder_ids[i],
26258@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *
26259 }
26260
26261 for (i = 0; i < crtc_req->count_connectors; i++) {
26262- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
26263+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
26264 if (get_user(out_id, &set_connectors_ptr[i])) {
26265 ret = -EFAULT;
26266 goto out;
26267@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
26268 fb = obj_to_fb(obj);
26269
26270 num_clips = r->num_clips;
26271- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
26272+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
26273
26274 if (!num_clips != !clips_ptr) {
26275 ret = -EINVAL;
26276@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct dr
26277 out_resp->flags = property->flags;
26278
26279 if ((out_resp->count_values >= value_count) && value_count) {
26280- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
26281+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
26282 for (i = 0; i < value_count; i++) {
26283 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
26284 ret = -EFAULT;
26285@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct dr
26286 if (property->flags & DRM_MODE_PROP_ENUM) {
26287 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
26288 copied = 0;
26289- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
26290+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
26291 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
26292
26293 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
26294@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct dr
26295 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
26296 copied = 0;
26297 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
26298- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
26299+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
26300
26301 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
26302 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
26303@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26304 struct drm_mode_get_blob *out_resp = data;
26305 struct drm_property_blob *blob;
26306 int ret = 0;
26307- void *blob_ptr;
26308+ void __user *blob_ptr;
26309
26310 if (!drm_core_check_feature(dev, DRIVER_MODESET))
26311 return -EINVAL;
26312@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26313 blob = obj_to_blob(obj);
26314
26315 if (out_resp->length == blob->length) {
26316- blob_ptr = (void *)(unsigned long)out_resp->data;
26317+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
26318 if (copy_to_user(blob_ptr, blob->data, blob->length)){
26319 ret = -EFAULT;
26320 goto done;
26321diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c
26322--- linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-11 15:19:27.000000000 -0500
26323+++ linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-16 18:40:10.000000000 -0500
26324@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
26325 struct drm_crtc *tmp;
26326 int crtc_mask = 1;
26327
26328- WARN(!crtc, "checking null crtc?\n");
26329+ BUG_ON(!crtc);
26330
26331 dev = crtc->dev;
26332
26333@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
26334 struct drm_encoder *encoder;
26335 bool ret = true;
26336
26337+ pax_track_stack();
26338+
26339 crtc->enabled = drm_helper_crtc_in_use(crtc);
26340 if (!crtc->enabled)
26341 return true;
26342diff -urNp linux-3.1.1/drivers/gpu/drm/drm_drv.c linux-3.1.1/drivers/gpu/drm/drm_drv.c
26343--- linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-11 15:19:27.000000000 -0500
26344+++ linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-16 18:39:07.000000000 -0500
26345@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26346 /**
26347 * Copy and IOCTL return string to user space
26348 */
26349-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26350+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26351 {
26352 int len;
26353
26354@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26355
26356 dev = file_priv->minor->dev;
26357 atomic_inc(&dev->ioctl_count);
26358- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26359+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26360 ++file_priv->ioctl_count;
26361
26362 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26363diff -urNp linux-3.1.1/drivers/gpu/drm/drm_fops.c linux-3.1.1/drivers/gpu/drm/drm_fops.c
26364--- linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-11 15:19:27.000000000 -0500
26365+++ linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-16 18:39:07.000000000 -0500
26366@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26367 }
26368
26369 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26370- atomic_set(&dev->counts[i], 0);
26371+ atomic_set_unchecked(&dev->counts[i], 0);
26372
26373 dev->sigdata.lock = NULL;
26374
26375@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26376
26377 retcode = drm_open_helper(inode, filp, dev);
26378 if (!retcode) {
26379- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26380- if (!dev->open_count++)
26381+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26382+ if (local_inc_return(&dev->open_count) == 1)
26383 retcode = drm_setup(dev);
26384 }
26385 if (!retcode) {
26386@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26387
26388 mutex_lock(&drm_global_mutex);
26389
26390- DRM_DEBUG("open_count = %d\n", dev->open_count);
26391+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26392
26393 if (dev->driver->preclose)
26394 dev->driver->preclose(dev, file_priv);
26395@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26396 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26397 task_pid_nr(current),
26398 (long)old_encode_dev(file_priv->minor->device),
26399- dev->open_count);
26400+ local_read(&dev->open_count));
26401
26402 /* if the master has gone away we can't do anything with the lock */
26403 if (file_priv->minor->master)
26404@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26405 * End inline drm_release
26406 */
26407
26408- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26409- if (!--dev->open_count) {
26410+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26411+ if (local_dec_and_test(&dev->open_count)) {
26412 if (atomic_read(&dev->ioctl_count)) {
26413 DRM_ERROR("Device busy: %d\n",
26414 atomic_read(&dev->ioctl_count));
26415diff -urNp linux-3.1.1/drivers/gpu/drm/drm_global.c linux-3.1.1/drivers/gpu/drm/drm_global.c
26416--- linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-11 15:19:27.000000000 -0500
26417+++ linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-16 18:39:07.000000000 -0500
26418@@ -36,7 +36,7 @@
26419 struct drm_global_item {
26420 struct mutex mutex;
26421 void *object;
26422- int refcount;
26423+ atomic_t refcount;
26424 };
26425
26426 static struct drm_global_item glob[DRM_GLOBAL_NUM];
26427@@ -49,7 +49,7 @@ void drm_global_init(void)
26428 struct drm_global_item *item = &glob[i];
26429 mutex_init(&item->mutex);
26430 item->object = NULL;
26431- item->refcount = 0;
26432+ atomic_set(&item->refcount, 0);
26433 }
26434 }
26435
26436@@ -59,7 +59,7 @@ void drm_global_release(void)
26437 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26438 struct drm_global_item *item = &glob[i];
26439 BUG_ON(item->object != NULL);
26440- BUG_ON(item->refcount != 0);
26441+ BUG_ON(atomic_read(&item->refcount) != 0);
26442 }
26443 }
26444
26445@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26446 void *object;
26447
26448 mutex_lock(&item->mutex);
26449- if (item->refcount == 0) {
26450+ if (atomic_read(&item->refcount) == 0) {
26451 item->object = kzalloc(ref->size, GFP_KERNEL);
26452 if (unlikely(item->object == NULL)) {
26453 ret = -ENOMEM;
26454@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26455 goto out_err;
26456
26457 }
26458- ++item->refcount;
26459+ atomic_inc(&item->refcount);
26460 ref->object = item->object;
26461 object = item->object;
26462 mutex_unlock(&item->mutex);
26463@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26464 struct drm_global_item *item = &glob[ref->global_type];
26465
26466 mutex_lock(&item->mutex);
26467- BUG_ON(item->refcount == 0);
26468+ BUG_ON(atomic_read(&item->refcount) == 0);
26469 BUG_ON(ref->object != item->object);
26470- if (--item->refcount == 0) {
26471+ if (atomic_dec_and_test(&item->refcount)) {
26472 ref->release(ref);
26473 item->object = NULL;
26474 }
26475diff -urNp linux-3.1.1/drivers/gpu/drm/drm_info.c linux-3.1.1/drivers/gpu/drm/drm_info.c
26476--- linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-11 15:19:27.000000000 -0500
26477+++ linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-16 18:40:10.000000000 -0500
26478@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26479 struct drm_local_map *map;
26480 struct drm_map_list *r_list;
26481
26482- /* Hardcoded from _DRM_FRAME_BUFFER,
26483- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26484- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26485- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26486+ static const char * const types[] = {
26487+ [_DRM_FRAME_BUFFER] = "FB",
26488+ [_DRM_REGISTERS] = "REG",
26489+ [_DRM_SHM] = "SHM",
26490+ [_DRM_AGP] = "AGP",
26491+ [_DRM_SCATTER_GATHER] = "SG",
26492+ [_DRM_CONSISTENT] = "PCI",
26493+ [_DRM_GEM] = "GEM" };
26494 const char *type;
26495 int i;
26496
26497@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26498 map = r_list->map;
26499 if (!map)
26500 continue;
26501- if (map->type < 0 || map->type > 5)
26502+ if (map->type >= ARRAY_SIZE(types))
26503 type = "??";
26504 else
26505 type = types[map->type];
26506@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26507 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26508 vma->vm_flags & VM_LOCKED ? 'l' : '-',
26509 vma->vm_flags & VM_IO ? 'i' : '-',
26510+#ifdef CONFIG_GRKERNSEC_HIDESYM
26511+ 0);
26512+#else
26513 vma->vm_pgoff);
26514+#endif
26515
26516 #if defined(__i386__)
26517 pgprot = pgprot_val(vma->vm_page_prot);
26518diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioc32.c linux-3.1.1/drivers/gpu/drm/drm_ioc32.c
26519--- linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-11 15:19:27.000000000 -0500
26520+++ linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-16 18:39:07.000000000 -0500
26521@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26522 request = compat_alloc_user_space(nbytes);
26523 if (!access_ok(VERIFY_WRITE, request, nbytes))
26524 return -EFAULT;
26525- list = (struct drm_buf_desc *) (request + 1);
26526+ list = (struct drm_buf_desc __user *) (request + 1);
26527
26528 if (__put_user(count, &request->count)
26529 || __put_user(list, &request->list))
26530@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26531 request = compat_alloc_user_space(nbytes);
26532 if (!access_ok(VERIFY_WRITE, request, nbytes))
26533 return -EFAULT;
26534- list = (struct drm_buf_pub *) (request + 1);
26535+ list = (struct drm_buf_pub __user *) (request + 1);
26536
26537 if (__put_user(count, &request->count)
26538 || __put_user(list, &request->list))
26539diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioctl.c linux-3.1.1/drivers/gpu/drm/drm_ioctl.c
26540--- linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-11 15:19:27.000000000 -0500
26541+++ linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-16 18:39:07.000000000 -0500
26542@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26543 stats->data[i].value =
26544 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26545 else
26546- stats->data[i].value = atomic_read(&dev->counts[i]);
26547+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26548 stats->data[i].type = dev->types[i];
26549 }
26550
26551diff -urNp linux-3.1.1/drivers/gpu/drm/drm_lock.c linux-3.1.1/drivers/gpu/drm/drm_lock.c
26552--- linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-11 15:19:27.000000000 -0500
26553+++ linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-16 18:39:07.000000000 -0500
26554@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26555 if (drm_lock_take(&master->lock, lock->context)) {
26556 master->lock.file_priv = file_priv;
26557 master->lock.lock_time = jiffies;
26558- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26559+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26560 break; /* Got lock */
26561 }
26562
26563@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26564 return -EINVAL;
26565 }
26566
26567- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26568+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26569
26570 if (drm_lock_free(&master->lock, lock->context)) {
26571 /* FIXME: Should really bail out here. */
26572diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c
26573--- linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-11 15:19:27.000000000 -0500
26574+++ linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-16 18:39:07.000000000 -0500
26575@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26576 dma->buflist[vertex->idx],
26577 vertex->discard, vertex->used);
26578
26579- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26580- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26581+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26582+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26583 sarea_priv->last_enqueue = dev_priv->counter - 1;
26584 sarea_priv->last_dispatch = (int)hw_status[5];
26585
26586@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26587 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26588 mc->last_render);
26589
26590- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26591- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26592+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26593+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26594 sarea_priv->last_enqueue = dev_priv->counter - 1;
26595 sarea_priv->last_dispatch = (int)hw_status[5];
26596
26597diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h
26598--- linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-11 15:19:27.000000000 -0500
26599+++ linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-16 18:39:07.000000000 -0500
26600@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26601 int page_flipping;
26602
26603 wait_queue_head_t irq_queue;
26604- atomic_t irq_received;
26605- atomic_t irq_emitted;
26606+ atomic_unchecked_t irq_received;
26607+ atomic_unchecked_t irq_emitted;
26608
26609 int front_offset;
26610 } drm_i810_private_t;
26611diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c
26612--- linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-11 15:19:27.000000000 -0500
26613+++ linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-16 18:39:07.000000000 -0500
26614@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26615 I915_READ(GTIMR));
26616 }
26617 seq_printf(m, "Interrupts received: %d\n",
26618- atomic_read(&dev_priv->irq_received));
26619+ atomic_read_unchecked(&dev_priv->irq_received));
26620 for (i = 0; i < I915_NUM_RINGS; i++) {
26621 if (IS_GEN6(dev) || IS_GEN7(dev)) {
26622 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26623@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file
26624 return ret;
26625
26626 if (opregion->header)
26627- seq_write(m, opregion->header, OPREGION_SIZE);
26628+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26629
26630 mutex_unlock(&dev->struct_mutex);
26631
26632diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c
26633--- linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-11 15:19:27.000000000 -0500
26634+++ linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-16 18:39:07.000000000 -0500
26635@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
26636 bool can_switch;
26637
26638 spin_lock(&dev->count_lock);
26639- can_switch = (dev->open_count == 0);
26640+ can_switch = (local_read(&dev->open_count) == 0);
26641 spin_unlock(&dev->count_lock);
26642 return can_switch;
26643 }
26644diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h
26645--- linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-11 15:19:27.000000000 -0500
26646+++ linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-16 18:39:07.000000000 -0500
26647@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
26648 /* render clock increase/decrease */
26649 /* display clock increase/decrease */
26650 /* pll clock increase/decrease */
26651-};
26652+} __no_const;
26653
26654 struct intel_device_info {
26655 u8 gen;
26656@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
26657 int current_page;
26658 int page_flipping;
26659
26660- atomic_t irq_received;
26661+ atomic_unchecked_t irq_received;
26662
26663 /* protects the irq masks */
26664 spinlock_t irq_lock;
26665@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
26666 * will be page flipped away on the next vblank. When it
26667 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26668 */
26669- atomic_t pending_flip;
26670+ atomic_unchecked_t pending_flip;
26671 };
26672
26673 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26674@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_
26675 extern void intel_teardown_gmbus(struct drm_device *dev);
26676 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26677 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26678-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26679+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26680 {
26681 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26682 }
26683diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26684--- linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-11 15:19:27.000000000 -0500
26685+++ linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-16 18:39:07.000000000 -0500
26686@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26687 i915_gem_clflush_object(obj);
26688
26689 if (obj->base.pending_write_domain)
26690- cd->flips |= atomic_read(&obj->pending_flip);
26691+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26692
26693 /* The actual obj->write_domain will be updated with
26694 * pending_write_domain after we emit the accumulated flush for all
26695diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c
26696--- linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-11 15:19:27.000000000 -0500
26697+++ linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-16 18:39:07.000000000 -0500
26698@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
26699 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26700 struct drm_i915_master_private *master_priv;
26701
26702- atomic_inc(&dev_priv->irq_received);
26703+ atomic_inc_unchecked(&dev_priv->irq_received);
26704
26705 /* disable master interrupt before clearing iir */
26706 de_ier = I915_READ(DEIER);
26707@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(
26708 struct drm_i915_master_private *master_priv;
26709 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26710
26711- atomic_inc(&dev_priv->irq_received);
26712+ atomic_inc_unchecked(&dev_priv->irq_received);
26713
26714 if (IS_GEN6(dev))
26715 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26716@@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handl
26717 int ret = IRQ_NONE, pipe;
26718 bool blc_event = false;
26719
26720- atomic_inc(&dev_priv->irq_received);
26721+ atomic_inc_unchecked(&dev_priv->irq_received);
26722
26723 iir = I915_READ(IIR);
26724
26725@@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(stru
26726 {
26727 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26728
26729- atomic_set(&dev_priv->irq_received, 0);
26730+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26731
26732 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26733 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26734@@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(s
26735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26736 int pipe;
26737
26738- atomic_set(&dev_priv->irq_received, 0);
26739+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26740
26741 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26742 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26743diff -urNp linux-3.1.1/drivers/gpu/drm/i915/intel_display.c linux-3.1.1/drivers/gpu/drm/i915/intel_display.c
26744--- linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-11 15:19:27.000000000 -0500
26745+++ linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-16 18:39:07.000000000 -0500
26746@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26747
26748 wait_event(dev_priv->pending_flip_queue,
26749 atomic_read(&dev_priv->mm.wedged) ||
26750- atomic_read(&obj->pending_flip) == 0);
26751+ atomic_read_unchecked(&obj->pending_flip) == 0);
26752
26753 /* Big Hammer, we also need to ensure that any pending
26754 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26755@@ -2824,7 +2824,7 @@ static void intel_crtc_wait_for_pending_
26756 obj = to_intel_framebuffer(crtc->fb)->obj;
26757 dev_priv = crtc->dev->dev_private;
26758 wait_event(dev_priv->pending_flip_queue,
26759- atomic_read(&obj->pending_flip) == 0);
26760+ atomic_read_unchecked(&obj->pending_flip) == 0);
26761 }
26762
26763 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26764@@ -6644,7 +6644,7 @@ static void do_intel_finish_page_flip(st
26765
26766 atomic_clear_mask(1 << intel_crtc->plane,
26767 &obj->pending_flip.counter);
26768- if (atomic_read(&obj->pending_flip) == 0)
26769+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26770 wake_up(&dev_priv->pending_flip_queue);
26771
26772 schedule_work(&work->work);
26773@@ -6933,7 +6933,7 @@ static int intel_crtc_page_flip(struct d
26774 /* Block clients from rendering to the new back buffer until
26775 * the flip occurs and the object is no longer visible.
26776 */
26777- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26778+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26779
26780 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26781 if (ret)
26782@@ -6947,7 +6947,7 @@ static int intel_crtc_page_flip(struct d
26783 return 0;
26784
26785 cleanup_pending:
26786- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26787+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26788 cleanup_objs:
26789 drm_gem_object_unreference(&work->old_fb_obj->base);
26790 drm_gem_object_unreference(&obj->base);
26791diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h
26792--- linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-11 15:19:27.000000000 -0500
26793+++ linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-16 18:39:07.000000000 -0500
26794@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26795 u32 clear_cmd;
26796 u32 maccess;
26797
26798- atomic_t vbl_received; /**< Number of vblanks received. */
26799+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26800 wait_queue_head_t fence_queue;
26801- atomic_t last_fence_retired;
26802+ atomic_unchecked_t last_fence_retired;
26803 u32 next_fence_to_post;
26804
26805 unsigned int fb_cpp;
26806diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c
26807--- linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-11 15:19:27.000000000 -0500
26808+++ linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-16 18:39:07.000000000 -0500
26809@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26810 if (crtc != 0)
26811 return 0;
26812
26813- return atomic_read(&dev_priv->vbl_received);
26814+ return atomic_read_unchecked(&dev_priv->vbl_received);
26815 }
26816
26817
26818@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26819 /* VBLANK interrupt */
26820 if (status & MGA_VLINEPEN) {
26821 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26822- atomic_inc(&dev_priv->vbl_received);
26823+ atomic_inc_unchecked(&dev_priv->vbl_received);
26824 drm_handle_vblank(dev, 0);
26825 handled = 1;
26826 }
26827@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26828 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26829 MGA_WRITE(MGA_PRIMEND, prim_end);
26830
26831- atomic_inc(&dev_priv->last_fence_retired);
26832+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26833 DRM_WAKEUP(&dev_priv->fence_queue);
26834 handled = 1;
26835 }
26836@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26837 * using fences.
26838 */
26839 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26840- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26841+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26842 - *sequence) <= (1 << 23)));
26843
26844 *sequence = cur_fence;
26845diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c
26846--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-11 15:19:27.000000000 -0500
26847+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-16 18:39:07.000000000 -0500
26848@@ -201,7 +201,7 @@ struct methods {
26849 const char desc[8];
26850 void (*loadbios)(struct drm_device *, uint8_t *);
26851 const bool rw;
26852-};
26853+} __do_const;
26854
26855 static struct methods shadow_methods[] = {
26856 { "PRAMIN", load_vbios_pramin, true },
26857@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct d
26858 struct bit_table {
26859 const char id;
26860 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26861-};
26862+} __no_const;
26863
26864 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26865
26866diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h
26867--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-11 15:19:27.000000000 -0500
26868+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-16 18:39:07.000000000 -0500
26869@@ -238,7 +238,7 @@ struct nouveau_channel {
26870 struct list_head pending;
26871 uint32_t sequence;
26872 uint32_t sequence_ack;
26873- atomic_t last_sequence_irq;
26874+ atomic_unchecked_t last_sequence_irq;
26875 struct nouveau_vma vma;
26876 } fence;
26877
26878@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
26879 u32 handle, u16 class);
26880 void (*set_tile_region)(struct drm_device *dev, int i);
26881 void (*tlb_flush)(struct drm_device *, int engine);
26882-};
26883+} __no_const;
26884
26885 struct nouveau_instmem_engine {
26886 void *priv;
26887@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
26888 struct nouveau_mc_engine {
26889 int (*init)(struct drm_device *dev);
26890 void (*takedown)(struct drm_device *dev);
26891-};
26892+} __no_const;
26893
26894 struct nouveau_timer_engine {
26895 int (*init)(struct drm_device *dev);
26896 void (*takedown)(struct drm_device *dev);
26897 uint64_t (*read)(struct drm_device *dev);
26898-};
26899+} __no_const;
26900
26901 struct nouveau_fb_engine {
26902 int num_tiles;
26903@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
26904 void (*put)(struct drm_device *, struct nouveau_mem **);
26905
26906 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26907-};
26908+} __no_const;
26909
26910 struct nouveau_engine {
26911 struct nouveau_instmem_engine instmem;
26912@@ -660,7 +660,7 @@ struct drm_nouveau_private {
26913 struct drm_global_reference mem_global_ref;
26914 struct ttm_bo_global_ref bo_global_ref;
26915 struct ttm_bo_device bdev;
26916- atomic_t validate_sequence;
26917+ atomic_unchecked_t validate_sequence;
26918 } ttm;
26919
26920 struct {
26921diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c
26922--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-11 15:19:27.000000000 -0500
26923+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-16 18:39:07.000000000 -0500
26924@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26925 if (USE_REFCNT(dev))
26926 sequence = nvchan_rd32(chan, 0x48);
26927 else
26928- sequence = atomic_read(&chan->fence.last_sequence_irq);
26929+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26930
26931 if (chan->fence.sequence_ack == sequence)
26932 goto out;
26933@@ -541,7 +541,7 @@ nouveau_fence_channel_init(struct nouvea
26934
26935 INIT_LIST_HEAD(&chan->fence.pending);
26936 spin_lock_init(&chan->fence.lock);
26937- atomic_set(&chan->fence.last_sequence_irq, 0);
26938+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26939 return 0;
26940 }
26941
26942diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c
26943--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-11 15:19:27.000000000 -0500
26944+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-16 18:39:07.000000000 -0500
26945@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
26946 int trycnt = 0;
26947 int ret, i;
26948
26949- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26950+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26951 retry:
26952 if (++trycnt > 100000) {
26953 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26954diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c
26955--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-11 15:19:27.000000000 -0500
26956+++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-16 18:39:07.000000000 -0500
26957@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switc
26958 bool can_switch;
26959
26960 spin_lock(&dev->count_lock);
26961- can_switch = (dev->open_count == 0);
26962+ can_switch = (local_read(&dev->open_count) == 0);
26963 spin_unlock(&dev->count_lock);
26964 return can_switch;
26965 }
26966diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c
26967--- linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-11 15:19:27.000000000 -0500
26968+++ linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-16 18:39:07.000000000 -0500
26969@@ -554,7 +554,7 @@ static int
26970 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26971 u32 class, u32 mthd, u32 data)
26972 {
26973- atomic_set(&chan->fence.last_sequence_irq, data);
26974+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26975 return 0;
26976 }
26977
26978diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c
26979--- linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-11 15:19:27.000000000 -0500
26980+++ linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-16 18:39:07.000000000 -0500
26981@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26982
26983 /* GH: Simple idle check.
26984 */
26985- atomic_set(&dev_priv->idle_count, 0);
26986+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26987
26988 /* We don't support anything other than bus-mastering ring mode,
26989 * but the ring can be in either AGP or PCI space for the ring
26990diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h
26991--- linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-11 15:19:27.000000000 -0500
26992+++ linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-16 18:39:07.000000000 -0500
26993@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26994 int is_pci;
26995 unsigned long cce_buffers_offset;
26996
26997- atomic_t idle_count;
26998+ atomic_unchecked_t idle_count;
26999
27000 int page_flipping;
27001 int current_page;
27002 u32 crtc_offset;
27003 u32 crtc_offset_cntl;
27004
27005- atomic_t vbl_received;
27006+ atomic_unchecked_t vbl_received;
27007
27008 u32 color_fmt;
27009 unsigned int front_offset;
27010diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c
27011--- linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-11 15:19:27.000000000 -0500
27012+++ linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-16 18:39:07.000000000 -0500
27013@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27014 if (crtc != 0)
27015 return 0;
27016
27017- return atomic_read(&dev_priv->vbl_received);
27018+ return atomic_read_unchecked(&dev_priv->vbl_received);
27019 }
27020
27021 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27022@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27023 /* VBLANK interrupt */
27024 if (status & R128_CRTC_VBLANK_INT) {
27025 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27026- atomic_inc(&dev_priv->vbl_received);
27027+ atomic_inc_unchecked(&dev_priv->vbl_received);
27028 drm_handle_vblank(dev, 0);
27029 return IRQ_HANDLED;
27030 }
27031diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_state.c linux-3.1.1/drivers/gpu/drm/r128/r128_state.c
27032--- linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-11 15:19:27.000000000 -0500
27033+++ linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-16 18:39:07.000000000 -0500
27034@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27035
27036 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27037 {
27038- if (atomic_read(&dev_priv->idle_count) == 0)
27039+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27040 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27041 else
27042- atomic_set(&dev_priv->idle_count, 0);
27043+ atomic_set_unchecked(&dev_priv->idle_count, 0);
27044 }
27045
27046 #endif
27047diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/atom.c linux-3.1.1/drivers/gpu/drm/radeon/atom.c
27048--- linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-11 15:19:27.000000000 -0500
27049+++ linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-16 19:09:42.000000000 -0500
27050@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27051 char name[512];
27052 int i;
27053
27054+ pax_track_stack();
27055+
27056 if (!ctx)
27057 return NULL;
27058
27059diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c
27060--- linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-11 15:19:27.000000000 -0500
27061+++ linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-16 18:39:07.000000000 -0500
27062@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27063 regex_t mask_rex;
27064 regmatch_t match[4];
27065 char buf[1024];
27066- size_t end;
27067+ long end;
27068 int len;
27069 int done = 0;
27070 int r;
27071 unsigned o;
27072 struct offset *offset;
27073 char last_reg_s[10];
27074- int last_reg;
27075+ unsigned long last_reg;
27076
27077 if (regcomp
27078 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27079diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c
27080--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-11 15:19:27.000000000 -0500
27081+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-16 18:40:10.000000000 -0500
27082@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27083 struct radeon_gpio_rec gpio;
27084 struct radeon_hpd hpd;
27085
27086+ pax_track_stack();
27087+
27088 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27089 return false;
27090
27091diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c
27092--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-11 15:19:27.000000000 -0500
27093+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-16 18:39:07.000000000 -0500
27094@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch
27095 bool can_switch;
27096
27097 spin_lock(&dev->count_lock);
27098- can_switch = (dev->open_count == 0);
27099+ can_switch = (local_read(&dev->open_count) == 0);
27100 spin_unlock(&dev->count_lock);
27101 return can_switch;
27102 }
27103diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c
27104--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-11 15:19:27.000000000 -0500
27105+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-16 18:40:10.000000000 -0500
27106@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct ra
27107 uint32_t post_div;
27108 u32 pll_out_min, pll_out_max;
27109
27110+ pax_track_stack();
27111+
27112 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27113 freq = freq * 1000;
27114
27115diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h
27116--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-11 15:19:27.000000000 -0500
27117+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-16 18:39:07.000000000 -0500
27118@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27119
27120 /* SW interrupt */
27121 wait_queue_head_t swi_queue;
27122- atomic_t swi_emitted;
27123+ atomic_unchecked_t swi_emitted;
27124 int vblank_crtc;
27125 uint32_t irq_enable_reg;
27126 uint32_t r500_disp_irq_reg;
27127diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c
27128--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-11 15:19:27.000000000 -0500
27129+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-16 18:39:07.000000000 -0500
27130@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27131 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27132 return 0;
27133 }
27134- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27135+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27136 if (!rdev->cp.ready)
27137 /* FIXME: cp is not running assume everythings is done right
27138 * away
27139@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27140 return r;
27141 }
27142 radeon_fence_write(rdev, 0);
27143- atomic_set(&rdev->fence_drv.seq, 0);
27144+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27145 INIT_LIST_HEAD(&rdev->fence_drv.created);
27146 INIT_LIST_HEAD(&rdev->fence_drv.emited);
27147 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27148diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon.h linux-3.1.1/drivers/gpu/drm/radeon/radeon.h
27149--- linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-11 15:19:27.000000000 -0500
27150+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-16 18:39:07.000000000 -0500
27151@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_d
27152 */
27153 struct radeon_fence_driver {
27154 uint32_t scratch_reg;
27155- atomic_t seq;
27156+ atomic_unchecked_t seq;
27157 uint32_t last_seq;
27158 unsigned long last_jiffies;
27159 unsigned long last_timeout;
27160@@ -962,7 +962,7 @@ struct radeon_asic {
27161 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27162 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27163 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27164-};
27165+} __no_const;
27166
27167 /*
27168 * Asic structures
27169diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c
27170--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27171+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27172@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27173 request = compat_alloc_user_space(sizeof(*request));
27174 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27175 || __put_user(req32.param, &request->param)
27176- || __put_user((void __user *)(unsigned long)req32.value,
27177+ || __put_user((unsigned long)req32.value,
27178 &request->value))
27179 return -EFAULT;
27180
27181diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c
27182--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-11 15:19:27.000000000 -0500
27183+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-16 18:39:07.000000000 -0500
27184@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27185 unsigned int ret;
27186 RING_LOCALS;
27187
27188- atomic_inc(&dev_priv->swi_emitted);
27189- ret = atomic_read(&dev_priv->swi_emitted);
27190+ atomic_inc_unchecked(&dev_priv->swi_emitted);
27191+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27192
27193 BEGIN_RING(4);
27194 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27195@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27196 drm_radeon_private_t *dev_priv =
27197 (drm_radeon_private_t *) dev->dev_private;
27198
27199- atomic_set(&dev_priv->swi_emitted, 0);
27200+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27201 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27202
27203 dev->max_vblank_count = 0x001fffff;
27204diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c
27205--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-11 15:19:27.000000000 -0500
27206+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-16 18:39:07.000000000 -0500
27207@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
27208 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
27209 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
27210
27211- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27212+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27213 sarea_priv->nbox * sizeof(depth_boxes[0])))
27214 return -EFAULT;
27215
27216@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
27217 {
27218 drm_radeon_private_t *dev_priv = dev->dev_private;
27219 drm_radeon_getparam_t *param = data;
27220- int value;
27221+ int value = 0;
27222
27223 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
27224
27225diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c
27226--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-11 15:19:27.000000000 -0500
27227+++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-16 18:39:07.000000000 -0500
27228@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struc
27229 }
27230 if (unlikely(ttm_vm_ops == NULL)) {
27231 ttm_vm_ops = vma->vm_ops;
27232- radeon_ttm_vm_ops = *ttm_vm_ops;
27233- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27234+ pax_open_kernel();
27235+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
27236+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27237+ pax_close_kernel();
27238 }
27239 vma->vm_ops = &radeon_ttm_vm_ops;
27240 return 0;
27241diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/rs690.c linux-3.1.1/drivers/gpu/drm/radeon/rs690.c
27242--- linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-11 15:19:27.000000000 -0500
27243+++ linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-16 18:39:07.000000000 -0500
27244@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
27245 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
27246 rdev->pm.sideport_bandwidth.full)
27247 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
27248- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
27249+ read_delay_latency.full = dfixed_const(800 * 1000);
27250 read_delay_latency.full = dfixed_div(read_delay_latency,
27251 rdev->pm.igp_sideport_mclk);
27252+ a.full = dfixed_const(370);
27253+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
27254 } else {
27255 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
27256 rdev->pm.k8_bandwidth.full)
27257diff -urNp linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c
27258--- linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-11 15:19:27.000000000 -0500
27259+++ linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-16 18:39:07.000000000 -0500
27260@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
27261 static int ttm_pool_mm_shrink(struct shrinker *shrink,
27262 struct shrink_control *sc)
27263 {
27264- static atomic_t start_pool = ATOMIC_INIT(0);
27265+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
27266 unsigned i;
27267- unsigned pool_offset = atomic_add_return(1, &start_pool);
27268+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
27269 struct ttm_page_pool *pool;
27270 int shrink_pages = sc->nr_to_scan;
27271
27272diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_drv.h linux-3.1.1/drivers/gpu/drm/via/via_drv.h
27273--- linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-11 15:19:27.000000000 -0500
27274+++ linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-16 18:39:07.000000000 -0500
27275@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
27276 typedef uint32_t maskarray_t[5];
27277
27278 typedef struct drm_via_irq {
27279- atomic_t irq_received;
27280+ atomic_unchecked_t irq_received;
27281 uint32_t pending_mask;
27282 uint32_t enable_mask;
27283 wait_queue_head_t irq_queue;
27284@@ -75,7 +75,7 @@ typedef struct drm_via_private {
27285 struct timeval last_vblank;
27286 int last_vblank_valid;
27287 unsigned usec_per_vblank;
27288- atomic_t vbl_received;
27289+ atomic_unchecked_t vbl_received;
27290 drm_via_state_t hc_state;
27291 char pci_buf[VIA_PCI_BUF_SIZE];
27292 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
27293diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_irq.c linux-3.1.1/drivers/gpu/drm/via/via_irq.c
27294--- linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-11 15:19:27.000000000 -0500
27295+++ linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-16 18:39:07.000000000 -0500
27296@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
27297 if (crtc != 0)
27298 return 0;
27299
27300- return atomic_read(&dev_priv->vbl_received);
27301+ return atomic_read_unchecked(&dev_priv->vbl_received);
27302 }
27303
27304 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
27305@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
27306
27307 status = VIA_READ(VIA_REG_INTERRUPT);
27308 if (status & VIA_IRQ_VBLANK_PENDING) {
27309- atomic_inc(&dev_priv->vbl_received);
27310- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
27311+ atomic_inc_unchecked(&dev_priv->vbl_received);
27312+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
27313 do_gettimeofday(&cur_vblank);
27314 if (dev_priv->last_vblank_valid) {
27315 dev_priv->usec_per_vblank =
27316@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27317 dev_priv->last_vblank = cur_vblank;
27318 dev_priv->last_vblank_valid = 1;
27319 }
27320- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
27321+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
27322 DRM_DEBUG("US per vblank is: %u\n",
27323 dev_priv->usec_per_vblank);
27324 }
27325@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27326
27327 for (i = 0; i < dev_priv->num_irqs; ++i) {
27328 if (status & cur_irq->pending_mask) {
27329- atomic_inc(&cur_irq->irq_received);
27330+ atomic_inc_unchecked(&cur_irq->irq_received);
27331 DRM_WAKEUP(&cur_irq->irq_queue);
27332 handled = 1;
27333 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
27334@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
27335 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27336 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
27337 masks[irq][4]));
27338- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27339+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27340 } else {
27341 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27342 (((cur_irq_sequence =
27343- atomic_read(&cur_irq->irq_received)) -
27344+ atomic_read_unchecked(&cur_irq->irq_received)) -
27345 *sequence) <= (1 << 23)));
27346 }
27347 *sequence = cur_irq_sequence;
27348@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27349 }
27350
27351 for (i = 0; i < dev_priv->num_irqs; ++i) {
27352- atomic_set(&cur_irq->irq_received, 0);
27353+ atomic_set_unchecked(&cur_irq->irq_received, 0);
27354 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27355 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27356 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27357@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27358 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27359 case VIA_IRQ_RELATIVE:
27360 irqwait->request.sequence +=
27361- atomic_read(&cur_irq->irq_received);
27362+ atomic_read_unchecked(&cur_irq->irq_received);
27363 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27364 case VIA_IRQ_ABSOLUTE:
27365 break;
27366diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27367--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-11 15:19:27.000000000 -0500
27368+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-16 18:39:07.000000000 -0500
27369@@ -240,7 +240,7 @@ struct vmw_private {
27370 * Fencing and IRQs.
27371 */
27372
27373- atomic_t fence_seq;
27374+ atomic_unchecked_t fence_seq;
27375 wait_queue_head_t fence_queue;
27376 wait_queue_head_t fifo_queue;
27377 atomic_t fence_queue_waiters;
27378diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27379--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-11 15:19:27.000000000 -0500
27380+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-16 18:39:07.000000000 -0500
27381@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27382 struct drm_vmw_fence_rep fence_rep;
27383 struct drm_vmw_fence_rep __user *user_fence_rep;
27384 int ret;
27385- void *user_cmd;
27386+ void __user *user_cmd;
27387 void *cmd;
27388 uint32_t sequence;
27389 struct vmw_sw_context *sw_context = &dev_priv->ctx;
27390diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27391--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-11 15:19:27.000000000 -0500
27392+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-16 18:39:07.000000000 -0500
27393@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27394 while (!vmw_lag_lt(queue, us)) {
27395 spin_lock(&queue->lock);
27396 if (list_empty(&queue->head))
27397- sequence = atomic_read(&dev_priv->fence_seq);
27398+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27399 else {
27400 fence = list_first_entry(&queue->head,
27401 struct vmw_fence, head);
27402diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27403--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-11 15:19:27.000000000 -0500
27404+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-16 18:39:07.000000000 -0500
27405@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27406 (unsigned int) min,
27407 (unsigned int) fifo->capabilities);
27408
27409- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27410+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27411 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27412 vmw_fence_queue_init(&fifo->fence_queue);
27413 return vmw_fifo_send_fence(dev_priv, &dummy);
27414@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27415 if (reserveable)
27416 iowrite32(bytes, fifo_mem +
27417 SVGA_FIFO_RESERVED);
27418- return fifo_mem + (next_cmd >> 2);
27419+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27420 } else {
27421 need_bounce = true;
27422 }
27423@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27424
27425 fm = vmw_fifo_reserve(dev_priv, bytes);
27426 if (unlikely(fm == NULL)) {
27427- *sequence = atomic_read(&dev_priv->fence_seq);
27428+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27429 ret = -ENOMEM;
27430 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27431 false, 3*HZ);
27432@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27433 }
27434
27435 do {
27436- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27437+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27438 } while (*sequence == 0);
27439
27440 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27441diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27442--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-11 15:19:27.000000000 -0500
27443+++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-16 18:39:07.000000000 -0500
27444@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27445 * emitted. Then the fence is stale and signaled.
27446 */
27447
27448- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27449+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27450 > VMW_FENCE_WRAP);
27451
27452 return ret;
27453@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27454
27455 if (fifo_idle)
27456 down_read(&fifo_state->rwsem);
27457- signal_seq = atomic_read(&dev_priv->fence_seq);
27458+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27459 ret = 0;
27460
27461 for (;;) {
27462diff -urNp linux-3.1.1/drivers/hid/hid-core.c linux-3.1.1/drivers/hid/hid-core.c
27463--- linux-3.1.1/drivers/hid/hid-core.c 2011-11-11 15:19:27.000000000 -0500
27464+++ linux-3.1.1/drivers/hid/hid-core.c 2011-11-16 18:39:07.000000000 -0500
27465@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device
27466
27467 int hid_add_device(struct hid_device *hdev)
27468 {
27469- static atomic_t id = ATOMIC_INIT(0);
27470+ static atomic_unchecked_t id = ATOMIC_INIT(0);
27471 int ret;
27472
27473 if (WARN_ON(hdev->status & HID_STAT_ADDED))
27474@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hd
27475 /* XXX hack, any other cleaner solution after the driver core
27476 * is converted to allow more than 20 bytes as the device name? */
27477 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27478- hdev->vendor, hdev->product, atomic_inc_return(&id));
27479+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27480
27481 hid_debug_register(hdev, dev_name(&hdev->dev));
27482 ret = device_add(&hdev->dev);
27483diff -urNp linux-3.1.1/drivers/hid/usbhid/hiddev.c linux-3.1.1/drivers/hid/usbhid/hiddev.c
27484--- linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-11 15:19:27.000000000 -0500
27485+++ linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-16 18:39:07.000000000 -0500
27486@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27487 break;
27488
27489 case HIDIOCAPPLICATION:
27490- if (arg < 0 || arg >= hid->maxapplication)
27491+ if (arg >= hid->maxapplication)
27492 break;
27493
27494 for (i = 0; i < hid->maxcollection; i++)
27495diff -urNp linux-3.1.1/drivers/hwmon/acpi_power_meter.c linux-3.1.1/drivers/hwmon/acpi_power_meter.c
27496--- linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-11 15:19:27.000000000 -0500
27497+++ linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-16 18:39:07.000000000 -0500
27498@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27499 return res;
27500
27501 temp /= 1000;
27502- if (temp < 0)
27503- return -EINVAL;
27504
27505 mutex_lock(&resource->lock);
27506 resource->trip[attr->index - 7] = temp;
27507diff -urNp linux-3.1.1/drivers/hwmon/sht15.c linux-3.1.1/drivers/hwmon/sht15.c
27508--- linux-3.1.1/drivers/hwmon/sht15.c 2011-11-11 15:19:27.000000000 -0500
27509+++ linux-3.1.1/drivers/hwmon/sht15.c 2011-11-16 18:39:07.000000000 -0500
27510@@ -166,7 +166,7 @@ struct sht15_data {
27511 int supply_uV;
27512 bool supply_uV_valid;
27513 struct work_struct update_supply_work;
27514- atomic_t interrupt_handled;
27515+ atomic_unchecked_t interrupt_handled;
27516 };
27517
27518 /**
27519@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27520 return ret;
27521
27522 gpio_direction_input(data->pdata->gpio_data);
27523- atomic_set(&data->interrupt_handled, 0);
27524+ atomic_set_unchecked(&data->interrupt_handled, 0);
27525
27526 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27527 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27528 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27529 /* Only relevant if the interrupt hasn't occurred. */
27530- if (!atomic_read(&data->interrupt_handled))
27531+ if (!atomic_read_unchecked(&data->interrupt_handled))
27532 schedule_work(&data->read_work);
27533 }
27534 ret = wait_event_timeout(data->wait_queue,
27535@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27536
27537 /* First disable the interrupt */
27538 disable_irq_nosync(irq);
27539- atomic_inc(&data->interrupt_handled);
27540+ atomic_inc_unchecked(&data->interrupt_handled);
27541 /* Then schedule a reading work struct */
27542 if (data->state != SHT15_READING_NOTHING)
27543 schedule_work(&data->read_work);
27544@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27545 * If not, then start the interrupt again - care here as could
27546 * have gone low in meantime so verify it hasn't!
27547 */
27548- atomic_set(&data->interrupt_handled, 0);
27549+ atomic_set_unchecked(&data->interrupt_handled, 0);
27550 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27551 /* If still not occurred or another handler has been scheduled */
27552 if (gpio_get_value(data->pdata->gpio_data)
27553- || atomic_read(&data->interrupt_handled))
27554+ || atomic_read_unchecked(&data->interrupt_handled))
27555 return;
27556 }
27557
27558diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c
27559--- linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-11 15:19:27.000000000 -0500
27560+++ linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-16 18:39:07.000000000 -0500
27561@@ -43,7 +43,7 @@
27562 extern struct i2c_adapter amd756_smbus;
27563
27564 static struct i2c_adapter *s4882_adapter;
27565-static struct i2c_algorithm *s4882_algo;
27566+static i2c_algorithm_no_const *s4882_algo;
27567
27568 /* Wrapper access functions for multiplexed SMBus */
27569 static DEFINE_MUTEX(amd756_lock);
27570diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c
27571--- linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-11 15:19:27.000000000 -0500
27572+++ linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-16 18:39:07.000000000 -0500
27573@@ -41,7 +41,7 @@
27574 extern struct i2c_adapter *nforce2_smbus;
27575
27576 static struct i2c_adapter *s4985_adapter;
27577-static struct i2c_algorithm *s4985_algo;
27578+static i2c_algorithm_no_const *s4985_algo;
27579
27580 /* Wrapper access functions for multiplexed SMBus */
27581 static DEFINE_MUTEX(nforce2_lock);
27582diff -urNp linux-3.1.1/drivers/i2c/i2c-mux.c linux-3.1.1/drivers/i2c/i2c-mux.c
27583--- linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-11 15:19:27.000000000 -0500
27584+++ linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-16 18:39:07.000000000 -0500
27585@@ -28,7 +28,7 @@
27586 /* multiplexer per channel data */
27587 struct i2c_mux_priv {
27588 struct i2c_adapter adap;
27589- struct i2c_algorithm algo;
27590+ i2c_algorithm_no_const algo;
27591
27592 struct i2c_adapter *parent;
27593 void *mux_dev; /* the mux chip/device */
27594diff -urNp linux-3.1.1/drivers/ide/aec62xx.c linux-3.1.1/drivers/ide/aec62xx.c
27595--- linux-3.1.1/drivers/ide/aec62xx.c 2011-11-11 15:19:27.000000000 -0500
27596+++ linux-3.1.1/drivers/ide/aec62xx.c 2011-11-16 18:39:07.000000000 -0500
27597@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27598 .cable_detect = atp86x_cable_detect,
27599 };
27600
27601-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27602+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27603 { /* 0: AEC6210 */
27604 .name = DRV_NAME,
27605 .init_chipset = init_chipset_aec62xx,
27606diff -urNp linux-3.1.1/drivers/ide/alim15x3.c linux-3.1.1/drivers/ide/alim15x3.c
27607--- linux-3.1.1/drivers/ide/alim15x3.c 2011-11-11 15:19:27.000000000 -0500
27608+++ linux-3.1.1/drivers/ide/alim15x3.c 2011-11-16 18:39:07.000000000 -0500
27609@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27610 .dma_sff_read_status = ide_dma_sff_read_status,
27611 };
27612
27613-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27614+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27615 .name = DRV_NAME,
27616 .init_chipset = init_chipset_ali15x3,
27617 .init_hwif = init_hwif_ali15x3,
27618diff -urNp linux-3.1.1/drivers/ide/amd74xx.c linux-3.1.1/drivers/ide/amd74xx.c
27619--- linux-3.1.1/drivers/ide/amd74xx.c 2011-11-11 15:19:27.000000000 -0500
27620+++ linux-3.1.1/drivers/ide/amd74xx.c 2011-11-16 18:39:07.000000000 -0500
27621@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27622 .udma_mask = udma, \
27623 }
27624
27625-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27626+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27627 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27628 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27629 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27630diff -urNp linux-3.1.1/drivers/ide/atiixp.c linux-3.1.1/drivers/ide/atiixp.c
27631--- linux-3.1.1/drivers/ide/atiixp.c 2011-11-11 15:19:27.000000000 -0500
27632+++ linux-3.1.1/drivers/ide/atiixp.c 2011-11-16 18:39:07.000000000 -0500
27633@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27634 .cable_detect = atiixp_cable_detect,
27635 };
27636
27637-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27638+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27639 { /* 0: IXP200/300/400/700 */
27640 .name = DRV_NAME,
27641 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27642diff -urNp linux-3.1.1/drivers/ide/cmd64x.c linux-3.1.1/drivers/ide/cmd64x.c
27643--- linux-3.1.1/drivers/ide/cmd64x.c 2011-11-11 15:19:27.000000000 -0500
27644+++ linux-3.1.1/drivers/ide/cmd64x.c 2011-11-16 18:39:07.000000000 -0500
27645@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27646 .dma_sff_read_status = ide_dma_sff_read_status,
27647 };
27648
27649-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27650+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27651 { /* 0: CMD643 */
27652 .name = DRV_NAME,
27653 .init_chipset = init_chipset_cmd64x,
27654diff -urNp linux-3.1.1/drivers/ide/cs5520.c linux-3.1.1/drivers/ide/cs5520.c
27655--- linux-3.1.1/drivers/ide/cs5520.c 2011-11-11 15:19:27.000000000 -0500
27656+++ linux-3.1.1/drivers/ide/cs5520.c 2011-11-16 18:39:07.000000000 -0500
27657@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27658 .set_dma_mode = cs5520_set_dma_mode,
27659 };
27660
27661-static const struct ide_port_info cyrix_chipset __devinitdata = {
27662+static const struct ide_port_info cyrix_chipset __devinitconst = {
27663 .name = DRV_NAME,
27664 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27665 .port_ops = &cs5520_port_ops,
27666diff -urNp linux-3.1.1/drivers/ide/cs5530.c linux-3.1.1/drivers/ide/cs5530.c
27667--- linux-3.1.1/drivers/ide/cs5530.c 2011-11-11 15:19:27.000000000 -0500
27668+++ linux-3.1.1/drivers/ide/cs5530.c 2011-11-16 18:39:07.000000000 -0500
27669@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27670 .udma_filter = cs5530_udma_filter,
27671 };
27672
27673-static const struct ide_port_info cs5530_chipset __devinitdata = {
27674+static const struct ide_port_info cs5530_chipset __devinitconst = {
27675 .name = DRV_NAME,
27676 .init_chipset = init_chipset_cs5530,
27677 .init_hwif = init_hwif_cs5530,
27678diff -urNp linux-3.1.1/drivers/ide/cs5535.c linux-3.1.1/drivers/ide/cs5535.c
27679--- linux-3.1.1/drivers/ide/cs5535.c 2011-11-11 15:19:27.000000000 -0500
27680+++ linux-3.1.1/drivers/ide/cs5535.c 2011-11-16 18:39:07.000000000 -0500
27681@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27682 .cable_detect = cs5535_cable_detect,
27683 };
27684
27685-static const struct ide_port_info cs5535_chipset __devinitdata = {
27686+static const struct ide_port_info cs5535_chipset __devinitconst = {
27687 .name = DRV_NAME,
27688 .port_ops = &cs5535_port_ops,
27689 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27690diff -urNp linux-3.1.1/drivers/ide/cy82c693.c linux-3.1.1/drivers/ide/cy82c693.c
27691--- linux-3.1.1/drivers/ide/cy82c693.c 2011-11-11 15:19:27.000000000 -0500
27692+++ linux-3.1.1/drivers/ide/cy82c693.c 2011-11-16 18:39:07.000000000 -0500
27693@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
27694 .set_dma_mode = cy82c693_set_dma_mode,
27695 };
27696
27697-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27698+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27699 .name = DRV_NAME,
27700 .init_iops = init_iops_cy82c693,
27701 .port_ops = &cy82c693_port_ops,
27702diff -urNp linux-3.1.1/drivers/ide/hpt366.c linux-3.1.1/drivers/ide/hpt366.c
27703--- linux-3.1.1/drivers/ide/hpt366.c 2011-11-11 15:19:27.000000000 -0500
27704+++ linux-3.1.1/drivers/ide/hpt366.c 2011-11-16 18:39:07.000000000 -0500
27705@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27706 }
27707 };
27708
27709-static const struct hpt_info hpt36x __devinitdata = {
27710+static const struct hpt_info hpt36x __devinitconst = {
27711 .chip_name = "HPT36x",
27712 .chip_type = HPT36x,
27713 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27714@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27715 .timings = &hpt36x_timings
27716 };
27717
27718-static const struct hpt_info hpt370 __devinitdata = {
27719+static const struct hpt_info hpt370 __devinitconst = {
27720 .chip_name = "HPT370",
27721 .chip_type = HPT370,
27722 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27723@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27724 .timings = &hpt37x_timings
27725 };
27726
27727-static const struct hpt_info hpt370a __devinitdata = {
27728+static const struct hpt_info hpt370a __devinitconst = {
27729 .chip_name = "HPT370A",
27730 .chip_type = HPT370A,
27731 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27732@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27733 .timings = &hpt37x_timings
27734 };
27735
27736-static const struct hpt_info hpt374 __devinitdata = {
27737+static const struct hpt_info hpt374 __devinitconst = {
27738 .chip_name = "HPT374",
27739 .chip_type = HPT374,
27740 .udma_mask = ATA_UDMA5,
27741@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27742 .timings = &hpt37x_timings
27743 };
27744
27745-static const struct hpt_info hpt372 __devinitdata = {
27746+static const struct hpt_info hpt372 __devinitconst = {
27747 .chip_name = "HPT372",
27748 .chip_type = HPT372,
27749 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27750@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27751 .timings = &hpt37x_timings
27752 };
27753
27754-static const struct hpt_info hpt372a __devinitdata = {
27755+static const struct hpt_info hpt372a __devinitconst = {
27756 .chip_name = "HPT372A",
27757 .chip_type = HPT372A,
27758 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27759@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27760 .timings = &hpt37x_timings
27761 };
27762
27763-static const struct hpt_info hpt302 __devinitdata = {
27764+static const struct hpt_info hpt302 __devinitconst = {
27765 .chip_name = "HPT302",
27766 .chip_type = HPT302,
27767 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27768@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27769 .timings = &hpt37x_timings
27770 };
27771
27772-static const struct hpt_info hpt371 __devinitdata = {
27773+static const struct hpt_info hpt371 __devinitconst = {
27774 .chip_name = "HPT371",
27775 .chip_type = HPT371,
27776 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27777@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27778 .timings = &hpt37x_timings
27779 };
27780
27781-static const struct hpt_info hpt372n __devinitdata = {
27782+static const struct hpt_info hpt372n __devinitconst = {
27783 .chip_name = "HPT372N",
27784 .chip_type = HPT372N,
27785 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27786@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27787 .timings = &hpt37x_timings
27788 };
27789
27790-static const struct hpt_info hpt302n __devinitdata = {
27791+static const struct hpt_info hpt302n __devinitconst = {
27792 .chip_name = "HPT302N",
27793 .chip_type = HPT302N,
27794 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27795@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27796 .timings = &hpt37x_timings
27797 };
27798
27799-static const struct hpt_info hpt371n __devinitdata = {
27800+static const struct hpt_info hpt371n __devinitconst = {
27801 .chip_name = "HPT371N",
27802 .chip_type = HPT371N,
27803 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27804@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27805 .dma_sff_read_status = ide_dma_sff_read_status,
27806 };
27807
27808-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27809+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27810 { /* 0: HPT36x */
27811 .name = DRV_NAME,
27812 .init_chipset = init_chipset_hpt366,
27813diff -urNp linux-3.1.1/drivers/ide/ide-cd.c linux-3.1.1/drivers/ide/ide-cd.c
27814--- linux-3.1.1/drivers/ide/ide-cd.c 2011-11-11 15:19:27.000000000 -0500
27815+++ linux-3.1.1/drivers/ide/ide-cd.c 2011-11-16 18:39:07.000000000 -0500
27816@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27817 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27818 if ((unsigned long)buf & alignment
27819 || blk_rq_bytes(rq) & q->dma_pad_mask
27820- || object_is_on_stack(buf))
27821+ || object_starts_on_stack(buf))
27822 drive->dma = 0;
27823 }
27824 }
27825diff -urNp linux-3.1.1/drivers/ide/ide-floppy.c linux-3.1.1/drivers/ide/ide-floppy.c
27826--- linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-11 15:19:27.000000000 -0500
27827+++ linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-16 18:40:10.000000000 -0500
27828@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27829 u8 pc_buf[256], header_len, desc_cnt;
27830 int i, rc = 1, blocks, length;
27831
27832+ pax_track_stack();
27833+
27834 ide_debug_log(IDE_DBG_FUNC, "enter");
27835
27836 drive->bios_cyl = 0;
27837diff -urNp linux-3.1.1/drivers/ide/ide-pci-generic.c linux-3.1.1/drivers/ide/ide-pci-generic.c
27838--- linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-11 15:19:27.000000000 -0500
27839+++ linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-16 18:39:07.000000000 -0500
27840@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27841 .udma_mask = ATA_UDMA6, \
27842 }
27843
27844-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27845+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27846 /* 0: Unknown */
27847 DECLARE_GENERIC_PCI_DEV(0),
27848
27849diff -urNp linux-3.1.1/drivers/ide/it8172.c linux-3.1.1/drivers/ide/it8172.c
27850--- linux-3.1.1/drivers/ide/it8172.c 2011-11-11 15:19:27.000000000 -0500
27851+++ linux-3.1.1/drivers/ide/it8172.c 2011-11-16 18:39:07.000000000 -0500
27852@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27853 .set_dma_mode = it8172_set_dma_mode,
27854 };
27855
27856-static const struct ide_port_info it8172_port_info __devinitdata = {
27857+static const struct ide_port_info it8172_port_info __devinitconst = {
27858 .name = DRV_NAME,
27859 .port_ops = &it8172_port_ops,
27860 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27861diff -urNp linux-3.1.1/drivers/ide/it8213.c linux-3.1.1/drivers/ide/it8213.c
27862--- linux-3.1.1/drivers/ide/it8213.c 2011-11-11 15:19:27.000000000 -0500
27863+++ linux-3.1.1/drivers/ide/it8213.c 2011-11-16 18:39:07.000000000 -0500
27864@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27865 .cable_detect = it8213_cable_detect,
27866 };
27867
27868-static const struct ide_port_info it8213_chipset __devinitdata = {
27869+static const struct ide_port_info it8213_chipset __devinitconst = {
27870 .name = DRV_NAME,
27871 .enablebits = { {0x41, 0x80, 0x80} },
27872 .port_ops = &it8213_port_ops,
27873diff -urNp linux-3.1.1/drivers/ide/it821x.c linux-3.1.1/drivers/ide/it821x.c
27874--- linux-3.1.1/drivers/ide/it821x.c 2011-11-11 15:19:27.000000000 -0500
27875+++ linux-3.1.1/drivers/ide/it821x.c 2011-11-16 18:39:07.000000000 -0500
27876@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27877 .cable_detect = it821x_cable_detect,
27878 };
27879
27880-static const struct ide_port_info it821x_chipset __devinitdata = {
27881+static const struct ide_port_info it821x_chipset __devinitconst = {
27882 .name = DRV_NAME,
27883 .init_chipset = init_chipset_it821x,
27884 .init_hwif = init_hwif_it821x,
27885diff -urNp linux-3.1.1/drivers/ide/jmicron.c linux-3.1.1/drivers/ide/jmicron.c
27886--- linux-3.1.1/drivers/ide/jmicron.c 2011-11-11 15:19:27.000000000 -0500
27887+++ linux-3.1.1/drivers/ide/jmicron.c 2011-11-16 18:39:07.000000000 -0500
27888@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27889 .cable_detect = jmicron_cable_detect,
27890 };
27891
27892-static const struct ide_port_info jmicron_chipset __devinitdata = {
27893+static const struct ide_port_info jmicron_chipset __devinitconst = {
27894 .name = DRV_NAME,
27895 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27896 .port_ops = &jmicron_port_ops,
27897diff -urNp linux-3.1.1/drivers/ide/ns87415.c linux-3.1.1/drivers/ide/ns87415.c
27898--- linux-3.1.1/drivers/ide/ns87415.c 2011-11-11 15:19:27.000000000 -0500
27899+++ linux-3.1.1/drivers/ide/ns87415.c 2011-11-16 18:39:07.000000000 -0500
27900@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27901 .dma_sff_read_status = superio_dma_sff_read_status,
27902 };
27903
27904-static const struct ide_port_info ns87415_chipset __devinitdata = {
27905+static const struct ide_port_info ns87415_chipset __devinitconst = {
27906 .name = DRV_NAME,
27907 .init_hwif = init_hwif_ns87415,
27908 .tp_ops = &ns87415_tp_ops,
27909diff -urNp linux-3.1.1/drivers/ide/opti621.c linux-3.1.1/drivers/ide/opti621.c
27910--- linux-3.1.1/drivers/ide/opti621.c 2011-11-11 15:19:27.000000000 -0500
27911+++ linux-3.1.1/drivers/ide/opti621.c 2011-11-16 18:39:07.000000000 -0500
27912@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27913 .set_pio_mode = opti621_set_pio_mode,
27914 };
27915
27916-static const struct ide_port_info opti621_chipset __devinitdata = {
27917+static const struct ide_port_info opti621_chipset __devinitconst = {
27918 .name = DRV_NAME,
27919 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27920 .port_ops = &opti621_port_ops,
27921diff -urNp linux-3.1.1/drivers/ide/pdc202xx_new.c linux-3.1.1/drivers/ide/pdc202xx_new.c
27922--- linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-11 15:19:27.000000000 -0500
27923+++ linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-16 18:39:07.000000000 -0500
27924@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27925 .udma_mask = udma, \
27926 }
27927
27928-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27929+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27930 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27931 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27932 };
27933diff -urNp linux-3.1.1/drivers/ide/pdc202xx_old.c linux-3.1.1/drivers/ide/pdc202xx_old.c
27934--- linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-11 15:19:27.000000000 -0500
27935+++ linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-16 18:39:07.000000000 -0500
27936@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27937 .max_sectors = sectors, \
27938 }
27939
27940-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27941+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27942 { /* 0: PDC20246 */
27943 .name = DRV_NAME,
27944 .init_chipset = init_chipset_pdc202xx,
27945diff -urNp linux-3.1.1/drivers/ide/piix.c linux-3.1.1/drivers/ide/piix.c
27946--- linux-3.1.1/drivers/ide/piix.c 2011-11-11 15:19:27.000000000 -0500
27947+++ linux-3.1.1/drivers/ide/piix.c 2011-11-16 18:39:07.000000000 -0500
27948@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27949 .udma_mask = udma, \
27950 }
27951
27952-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27953+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27954 /* 0: MPIIX */
27955 { /*
27956 * MPIIX actually has only a single IDE channel mapped to
27957diff -urNp linux-3.1.1/drivers/ide/rz1000.c linux-3.1.1/drivers/ide/rz1000.c
27958--- linux-3.1.1/drivers/ide/rz1000.c 2011-11-11 15:19:27.000000000 -0500
27959+++ linux-3.1.1/drivers/ide/rz1000.c 2011-11-16 18:39:07.000000000 -0500
27960@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27961 }
27962 }
27963
27964-static const struct ide_port_info rz1000_chipset __devinitdata = {
27965+static const struct ide_port_info rz1000_chipset __devinitconst = {
27966 .name = DRV_NAME,
27967 .host_flags = IDE_HFLAG_NO_DMA,
27968 };
27969diff -urNp linux-3.1.1/drivers/ide/sc1200.c linux-3.1.1/drivers/ide/sc1200.c
27970--- linux-3.1.1/drivers/ide/sc1200.c 2011-11-11 15:19:27.000000000 -0500
27971+++ linux-3.1.1/drivers/ide/sc1200.c 2011-11-16 18:39:07.000000000 -0500
27972@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27973 .dma_sff_read_status = ide_dma_sff_read_status,
27974 };
27975
27976-static const struct ide_port_info sc1200_chipset __devinitdata = {
27977+static const struct ide_port_info sc1200_chipset __devinitconst = {
27978 .name = DRV_NAME,
27979 .port_ops = &sc1200_port_ops,
27980 .dma_ops = &sc1200_dma_ops,
27981diff -urNp linux-3.1.1/drivers/ide/scc_pata.c linux-3.1.1/drivers/ide/scc_pata.c
27982--- linux-3.1.1/drivers/ide/scc_pata.c 2011-11-11 15:19:27.000000000 -0500
27983+++ linux-3.1.1/drivers/ide/scc_pata.c 2011-11-16 18:39:07.000000000 -0500
27984@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27985 .dma_sff_read_status = scc_dma_sff_read_status,
27986 };
27987
27988-static const struct ide_port_info scc_chipset __devinitdata = {
27989+static const struct ide_port_info scc_chipset __devinitconst = {
27990 .name = "sccIDE",
27991 .init_iops = init_iops_scc,
27992 .init_dma = scc_init_dma,
27993diff -urNp linux-3.1.1/drivers/ide/serverworks.c linux-3.1.1/drivers/ide/serverworks.c
27994--- linux-3.1.1/drivers/ide/serverworks.c 2011-11-11 15:19:27.000000000 -0500
27995+++ linux-3.1.1/drivers/ide/serverworks.c 2011-11-16 18:39:07.000000000 -0500
27996@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27997 .cable_detect = svwks_cable_detect,
27998 };
27999
28000-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
28001+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
28002 { /* 0: OSB4 */
28003 .name = DRV_NAME,
28004 .init_chipset = init_chipset_svwks,
28005diff -urNp linux-3.1.1/drivers/ide/setup-pci.c linux-3.1.1/drivers/ide/setup-pci.c
28006--- linux-3.1.1/drivers/ide/setup-pci.c 2011-11-11 15:19:27.000000000 -0500
28007+++ linux-3.1.1/drivers/ide/setup-pci.c 2011-11-16 18:40:10.000000000 -0500
28008@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28009 int ret, i, n_ports = dev2 ? 4 : 2;
28010 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28011
28012+ pax_track_stack();
28013+
28014 for (i = 0; i < n_ports / 2; i++) {
28015 ret = ide_setup_pci_controller(pdev[i], d, !i);
28016 if (ret < 0)
28017diff -urNp linux-3.1.1/drivers/ide/siimage.c linux-3.1.1/drivers/ide/siimage.c
28018--- linux-3.1.1/drivers/ide/siimage.c 2011-11-11 15:19:27.000000000 -0500
28019+++ linux-3.1.1/drivers/ide/siimage.c 2011-11-16 18:39:07.000000000 -0500
28020@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28021 .udma_mask = ATA_UDMA6, \
28022 }
28023
28024-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28025+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28026 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28027 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28028 };
28029diff -urNp linux-3.1.1/drivers/ide/sis5513.c linux-3.1.1/drivers/ide/sis5513.c
28030--- linux-3.1.1/drivers/ide/sis5513.c 2011-11-11 15:19:27.000000000 -0500
28031+++ linux-3.1.1/drivers/ide/sis5513.c 2011-11-16 18:39:07.000000000 -0500
28032@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28033 .cable_detect = sis_cable_detect,
28034 };
28035
28036-static const struct ide_port_info sis5513_chipset __devinitdata = {
28037+static const struct ide_port_info sis5513_chipset __devinitconst = {
28038 .name = DRV_NAME,
28039 .init_chipset = init_chipset_sis5513,
28040 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28041diff -urNp linux-3.1.1/drivers/ide/sl82c105.c linux-3.1.1/drivers/ide/sl82c105.c
28042--- linux-3.1.1/drivers/ide/sl82c105.c 2011-11-11 15:19:27.000000000 -0500
28043+++ linux-3.1.1/drivers/ide/sl82c105.c 2011-11-16 18:39:07.000000000 -0500
28044@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28045 .dma_sff_read_status = ide_dma_sff_read_status,
28046 };
28047
28048-static const struct ide_port_info sl82c105_chipset __devinitdata = {
28049+static const struct ide_port_info sl82c105_chipset __devinitconst = {
28050 .name = DRV_NAME,
28051 .init_chipset = init_chipset_sl82c105,
28052 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28053diff -urNp linux-3.1.1/drivers/ide/slc90e66.c linux-3.1.1/drivers/ide/slc90e66.c
28054--- linux-3.1.1/drivers/ide/slc90e66.c 2011-11-11 15:19:27.000000000 -0500
28055+++ linux-3.1.1/drivers/ide/slc90e66.c 2011-11-16 18:39:07.000000000 -0500
28056@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28057 .cable_detect = slc90e66_cable_detect,
28058 };
28059
28060-static const struct ide_port_info slc90e66_chipset __devinitdata = {
28061+static const struct ide_port_info slc90e66_chipset __devinitconst = {
28062 .name = DRV_NAME,
28063 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28064 .port_ops = &slc90e66_port_ops,
28065diff -urNp linux-3.1.1/drivers/ide/tc86c001.c linux-3.1.1/drivers/ide/tc86c001.c
28066--- linux-3.1.1/drivers/ide/tc86c001.c 2011-11-11 15:19:27.000000000 -0500
28067+++ linux-3.1.1/drivers/ide/tc86c001.c 2011-11-16 18:39:07.000000000 -0500
28068@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28069 .dma_sff_read_status = ide_dma_sff_read_status,
28070 };
28071
28072-static const struct ide_port_info tc86c001_chipset __devinitdata = {
28073+static const struct ide_port_info tc86c001_chipset __devinitconst = {
28074 .name = DRV_NAME,
28075 .init_hwif = init_hwif_tc86c001,
28076 .port_ops = &tc86c001_port_ops,
28077diff -urNp linux-3.1.1/drivers/ide/triflex.c linux-3.1.1/drivers/ide/triflex.c
28078--- linux-3.1.1/drivers/ide/triflex.c 2011-11-11 15:19:27.000000000 -0500
28079+++ linux-3.1.1/drivers/ide/triflex.c 2011-11-16 18:39:07.000000000 -0500
28080@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28081 .set_dma_mode = triflex_set_mode,
28082 };
28083
28084-static const struct ide_port_info triflex_device __devinitdata = {
28085+static const struct ide_port_info triflex_device __devinitconst = {
28086 .name = DRV_NAME,
28087 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28088 .port_ops = &triflex_port_ops,
28089diff -urNp linux-3.1.1/drivers/ide/trm290.c linux-3.1.1/drivers/ide/trm290.c
28090--- linux-3.1.1/drivers/ide/trm290.c 2011-11-11 15:19:27.000000000 -0500
28091+++ linux-3.1.1/drivers/ide/trm290.c 2011-11-16 18:39:07.000000000 -0500
28092@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28093 .dma_check = trm290_dma_check,
28094 };
28095
28096-static const struct ide_port_info trm290_chipset __devinitdata = {
28097+static const struct ide_port_info trm290_chipset __devinitconst = {
28098 .name = DRV_NAME,
28099 .init_hwif = init_hwif_trm290,
28100 .tp_ops = &trm290_tp_ops,
28101diff -urNp linux-3.1.1/drivers/ide/via82cxxx.c linux-3.1.1/drivers/ide/via82cxxx.c
28102--- linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-11 15:19:27.000000000 -0500
28103+++ linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-16 18:39:07.000000000 -0500
28104@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28105 .cable_detect = via82cxxx_cable_detect,
28106 };
28107
28108-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28109+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28110 .name = DRV_NAME,
28111 .init_chipset = init_chipset_via82cxxx,
28112 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28113diff -urNp linux-3.1.1/drivers/infiniband/core/cm.c linux-3.1.1/drivers/infiniband/core/cm.c
28114--- linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-11 15:19:27.000000000 -0500
28115+++ linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-16 18:39:07.000000000 -0500
28116@@ -113,7 +113,7 @@ static char const counter_group_names[CM
28117
28118 struct cm_counter_group {
28119 struct kobject obj;
28120- atomic_long_t counter[CM_ATTR_COUNT];
28121+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28122 };
28123
28124 struct cm_counter_attribute {
28125@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28126 struct ib_mad_send_buf *msg = NULL;
28127 int ret;
28128
28129- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28130+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28131 counter[CM_REQ_COUNTER]);
28132
28133 /* Quick state check to discard duplicate REQs. */
28134@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28135 if (!cm_id_priv)
28136 return;
28137
28138- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28139+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28140 counter[CM_REP_COUNTER]);
28141 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28142 if (ret)
28143@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28144 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28145 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28146 spin_unlock_irq(&cm_id_priv->lock);
28147- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28148+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28149 counter[CM_RTU_COUNTER]);
28150 goto out;
28151 }
28152@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28153 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28154 dreq_msg->local_comm_id);
28155 if (!cm_id_priv) {
28156- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28157+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28158 counter[CM_DREQ_COUNTER]);
28159 cm_issue_drep(work->port, work->mad_recv_wc);
28160 return -EINVAL;
28161@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28162 case IB_CM_MRA_REP_RCVD:
28163 break;
28164 case IB_CM_TIMEWAIT:
28165- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28166+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28167 counter[CM_DREQ_COUNTER]);
28168 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28169 goto unlock;
28170@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28171 cm_free_msg(msg);
28172 goto deref;
28173 case IB_CM_DREQ_RCVD:
28174- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28175+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28176 counter[CM_DREQ_COUNTER]);
28177 goto unlock;
28178 default:
28179@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28180 ib_modify_mad(cm_id_priv->av.port->mad_agent,
28181 cm_id_priv->msg, timeout)) {
28182 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28183- atomic_long_inc(&work->port->
28184+ atomic_long_inc_unchecked(&work->port->
28185 counter_group[CM_RECV_DUPLICATES].
28186 counter[CM_MRA_COUNTER]);
28187 goto out;
28188@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28189 break;
28190 case IB_CM_MRA_REQ_RCVD:
28191 case IB_CM_MRA_REP_RCVD:
28192- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28193+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28194 counter[CM_MRA_COUNTER]);
28195 /* fall through */
28196 default:
28197@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28198 case IB_CM_LAP_IDLE:
28199 break;
28200 case IB_CM_MRA_LAP_SENT:
28201- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28202+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28203 counter[CM_LAP_COUNTER]);
28204 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28205 goto unlock;
28206@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
28207 cm_free_msg(msg);
28208 goto deref;
28209 case IB_CM_LAP_RCVD:
28210- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28211+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28212 counter[CM_LAP_COUNTER]);
28213 goto unlock;
28214 default:
28215@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
28216 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
28217 if (cur_cm_id_priv) {
28218 spin_unlock_irq(&cm.lock);
28219- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28220+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28221 counter[CM_SIDR_REQ_COUNTER]);
28222 goto out; /* Duplicate message. */
28223 }
28224@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
28225 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
28226 msg->retries = 1;
28227
28228- atomic_long_add(1 + msg->retries,
28229+ atomic_long_add_unchecked(1 + msg->retries,
28230 &port->counter_group[CM_XMIT].counter[attr_index]);
28231 if (msg->retries)
28232- atomic_long_add(msg->retries,
28233+ atomic_long_add_unchecked(msg->retries,
28234 &port->counter_group[CM_XMIT_RETRIES].
28235 counter[attr_index]);
28236
28237@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
28238 }
28239
28240 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
28241- atomic_long_inc(&port->counter_group[CM_RECV].
28242+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
28243 counter[attr_id - CM_ATTR_ID_OFFSET]);
28244
28245 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
28246@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
28247 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
28248
28249 return sprintf(buf, "%ld\n",
28250- atomic_long_read(&group->counter[cm_attr->index]));
28251+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
28252 }
28253
28254 static const struct sysfs_ops cm_counter_ops = {
28255diff -urNp linux-3.1.1/drivers/infiniband/core/fmr_pool.c linux-3.1.1/drivers/infiniband/core/fmr_pool.c
28256--- linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-11 15:19:27.000000000 -0500
28257+++ linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-16 18:39:07.000000000 -0500
28258@@ -97,8 +97,8 @@ struct ib_fmr_pool {
28259
28260 struct task_struct *thread;
28261
28262- atomic_t req_ser;
28263- atomic_t flush_ser;
28264+ atomic_unchecked_t req_ser;
28265+ atomic_unchecked_t flush_ser;
28266
28267 wait_queue_head_t force_wait;
28268 };
28269@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
28270 struct ib_fmr_pool *pool = pool_ptr;
28271
28272 do {
28273- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
28274+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
28275 ib_fmr_batch_release(pool);
28276
28277- atomic_inc(&pool->flush_ser);
28278+ atomic_inc_unchecked(&pool->flush_ser);
28279 wake_up_interruptible(&pool->force_wait);
28280
28281 if (pool->flush_function)
28282@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
28283 }
28284
28285 set_current_state(TASK_INTERRUPTIBLE);
28286- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
28287+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
28288 !kthread_should_stop())
28289 schedule();
28290 __set_current_state(TASK_RUNNING);
28291@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
28292 pool->dirty_watermark = params->dirty_watermark;
28293 pool->dirty_len = 0;
28294 spin_lock_init(&pool->pool_lock);
28295- atomic_set(&pool->req_ser, 0);
28296- atomic_set(&pool->flush_ser, 0);
28297+ atomic_set_unchecked(&pool->req_ser, 0);
28298+ atomic_set_unchecked(&pool->flush_ser, 0);
28299 init_waitqueue_head(&pool->force_wait);
28300
28301 pool->thread = kthread_run(ib_fmr_cleanup_thread,
28302@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
28303 }
28304 spin_unlock_irq(&pool->pool_lock);
28305
28306- serial = atomic_inc_return(&pool->req_ser);
28307+ serial = atomic_inc_return_unchecked(&pool->req_ser);
28308 wake_up_process(pool->thread);
28309
28310 if (wait_event_interruptible(pool->force_wait,
28311- atomic_read(&pool->flush_ser) - serial >= 0))
28312+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
28313 return -EINTR;
28314
28315 return 0;
28316@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
28317 } else {
28318 list_add_tail(&fmr->list, &pool->dirty_list);
28319 if (++pool->dirty_len >= pool->dirty_watermark) {
28320- atomic_inc(&pool->req_ser);
28321+ atomic_inc_unchecked(&pool->req_ser);
28322 wake_up_process(pool->thread);
28323 }
28324 }
28325diff -urNp linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c
28326--- linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-11 15:19:27.000000000 -0500
28327+++ linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-16 18:39:07.000000000 -0500
28328@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28329 int err;
28330 struct fw_ri_tpte tpt;
28331 u32 stag_idx;
28332- static atomic_t key;
28333+ static atomic_unchecked_t key;
28334
28335 if (c4iw_fatal_error(rdev))
28336 return -EIO;
28337@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28338 &rdev->resource.tpt_fifo_lock);
28339 if (!stag_idx)
28340 return -ENOMEM;
28341- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28342+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28343 }
28344 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28345 __func__, stag_state, type, pdid, stag_idx);
28346diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c
28347--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-11 15:19:27.000000000 -0500
28348+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-16 18:40:10.000000000 -0500
28349@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28350 struct infinipath_counters counters;
28351 struct ipath_devdata *dd;
28352
28353+ pax_track_stack();
28354+
28355 dd = file->f_path.dentry->d_inode->i_private;
28356 dd->ipath_f_read_counters(dd, &counters);
28357
28358diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c
28359--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-11 15:19:27.000000000 -0500
28360+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-16 18:39:07.000000000 -0500
28361@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28362 struct ib_atomic_eth *ateth;
28363 struct ipath_ack_entry *e;
28364 u64 vaddr;
28365- atomic64_t *maddr;
28366+ atomic64_unchecked_t *maddr;
28367 u64 sdata;
28368 u32 rkey;
28369 u8 next;
28370@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28371 IB_ACCESS_REMOTE_ATOMIC)))
28372 goto nack_acc_unlck;
28373 /* Perform atomic OP and save result. */
28374- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28375+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28376 sdata = be64_to_cpu(ateth->swap_data);
28377 e = &qp->s_ack_queue[qp->r_head_ack_queue];
28378 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28379- (u64) atomic64_add_return(sdata, maddr) - sdata :
28380+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28381 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28382 be64_to_cpu(ateth->compare_data),
28383 sdata);
28384diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c
28385--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-11 15:19:27.000000000 -0500
28386+++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-16 18:39:07.000000000 -0500
28387@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28388 unsigned long flags;
28389 struct ib_wc wc;
28390 u64 sdata;
28391- atomic64_t *maddr;
28392+ atomic64_unchecked_t *maddr;
28393 enum ib_wc_status send_status;
28394
28395 /*
28396@@ -382,11 +382,11 @@ again:
28397 IB_ACCESS_REMOTE_ATOMIC)))
28398 goto acc_err;
28399 /* Perform atomic OP and save result. */
28400- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28401+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28402 sdata = wqe->wr.wr.atomic.compare_add;
28403 *(u64 *) sqp->s_sge.sge.vaddr =
28404 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28405- (u64) atomic64_add_return(sdata, maddr) - sdata :
28406+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28407 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28408 sdata, wqe->wr.wr.atomic.swap);
28409 goto send_comp;
28410diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.c linux-3.1.1/drivers/infiniband/hw/nes/nes.c
28411--- linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-11 15:19:27.000000000 -0500
28412+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-16 18:39:07.000000000 -0500
28413@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28414 LIST_HEAD(nes_adapter_list);
28415 static LIST_HEAD(nes_dev_list);
28416
28417-atomic_t qps_destroyed;
28418+atomic_unchecked_t qps_destroyed;
28419
28420 static unsigned int ee_flsh_adapter;
28421 static unsigned int sysfs_nonidx_addr;
28422@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28423 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28424 struct nes_adapter *nesadapter = nesdev->nesadapter;
28425
28426- atomic_inc(&qps_destroyed);
28427+ atomic_inc_unchecked(&qps_destroyed);
28428
28429 /* Free the control structures */
28430
28431diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c
28432--- linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-11 15:19:27.000000000 -0500
28433+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-16 18:39:07.000000000 -0500
28434@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28435 u32 cm_packets_retrans;
28436 u32 cm_packets_created;
28437 u32 cm_packets_received;
28438-atomic_t cm_listens_created;
28439-atomic_t cm_listens_destroyed;
28440+atomic_unchecked_t cm_listens_created;
28441+atomic_unchecked_t cm_listens_destroyed;
28442 u32 cm_backlog_drops;
28443-atomic_t cm_loopbacks;
28444-atomic_t cm_nodes_created;
28445-atomic_t cm_nodes_destroyed;
28446-atomic_t cm_accel_dropped_pkts;
28447-atomic_t cm_resets_recvd;
28448+atomic_unchecked_t cm_loopbacks;
28449+atomic_unchecked_t cm_nodes_created;
28450+atomic_unchecked_t cm_nodes_destroyed;
28451+atomic_unchecked_t cm_accel_dropped_pkts;
28452+atomic_unchecked_t cm_resets_recvd;
28453
28454 static inline int mini_cm_accelerated(struct nes_cm_core *,
28455 struct nes_cm_node *);
28456@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28457
28458 static struct nes_cm_core *g_cm_core;
28459
28460-atomic_t cm_connects;
28461-atomic_t cm_accepts;
28462-atomic_t cm_disconnects;
28463-atomic_t cm_closes;
28464-atomic_t cm_connecteds;
28465-atomic_t cm_connect_reqs;
28466-atomic_t cm_rejects;
28467+atomic_unchecked_t cm_connects;
28468+atomic_unchecked_t cm_accepts;
28469+atomic_unchecked_t cm_disconnects;
28470+atomic_unchecked_t cm_closes;
28471+atomic_unchecked_t cm_connecteds;
28472+atomic_unchecked_t cm_connect_reqs;
28473+atomic_unchecked_t cm_rejects;
28474
28475
28476 /**
28477@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28478 kfree(listener);
28479 listener = NULL;
28480 ret = 0;
28481- atomic_inc(&cm_listens_destroyed);
28482+ atomic_inc_unchecked(&cm_listens_destroyed);
28483 } else {
28484 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28485 }
28486@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28487 cm_node->rem_mac);
28488
28489 add_hte_node(cm_core, cm_node);
28490- atomic_inc(&cm_nodes_created);
28491+ atomic_inc_unchecked(&cm_nodes_created);
28492
28493 return cm_node;
28494 }
28495@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28496 }
28497
28498 atomic_dec(&cm_core->node_cnt);
28499- atomic_inc(&cm_nodes_destroyed);
28500+ atomic_inc_unchecked(&cm_nodes_destroyed);
28501 nesqp = cm_node->nesqp;
28502 if (nesqp) {
28503 nesqp->cm_node = NULL;
28504@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28505
28506 static void drop_packet(struct sk_buff *skb)
28507 {
28508- atomic_inc(&cm_accel_dropped_pkts);
28509+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28510 dev_kfree_skb_any(skb);
28511 }
28512
28513@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28514 {
28515
28516 int reset = 0; /* whether to send reset in case of err.. */
28517- atomic_inc(&cm_resets_recvd);
28518+ atomic_inc_unchecked(&cm_resets_recvd);
28519 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28520 " refcnt=%d\n", cm_node, cm_node->state,
28521 atomic_read(&cm_node->ref_count));
28522@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28523 rem_ref_cm_node(cm_node->cm_core, cm_node);
28524 return NULL;
28525 }
28526- atomic_inc(&cm_loopbacks);
28527+ atomic_inc_unchecked(&cm_loopbacks);
28528 loopbackremotenode->loopbackpartner = cm_node;
28529 loopbackremotenode->tcp_cntxt.rcv_wscale =
28530 NES_CM_DEFAULT_RCV_WND_SCALE;
28531@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28532 add_ref_cm_node(cm_node);
28533 } else if (cm_node->state == NES_CM_STATE_TSA) {
28534 rem_ref_cm_node(cm_core, cm_node);
28535- atomic_inc(&cm_accel_dropped_pkts);
28536+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28537 dev_kfree_skb_any(skb);
28538 break;
28539 }
28540@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28541
28542 if ((cm_id) && (cm_id->event_handler)) {
28543 if (issue_disconn) {
28544- atomic_inc(&cm_disconnects);
28545+ atomic_inc_unchecked(&cm_disconnects);
28546 cm_event.event = IW_CM_EVENT_DISCONNECT;
28547 cm_event.status = disconn_status;
28548 cm_event.local_addr = cm_id->local_addr;
28549@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28550 }
28551
28552 if (issue_close) {
28553- atomic_inc(&cm_closes);
28554+ atomic_inc_unchecked(&cm_closes);
28555 nes_disconnect(nesqp, 1);
28556
28557 cm_id->provider_data = nesqp;
28558@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28559
28560 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28561 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28562- atomic_inc(&cm_accepts);
28563+ atomic_inc_unchecked(&cm_accepts);
28564
28565 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28566 netdev_refcnt_read(nesvnic->netdev));
28567@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28568
28569 struct nes_cm_core *cm_core;
28570
28571- atomic_inc(&cm_rejects);
28572+ atomic_inc_unchecked(&cm_rejects);
28573 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28574 loopback = cm_node->loopbackpartner;
28575 cm_core = cm_node->cm_core;
28576@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28577 ntohl(cm_id->local_addr.sin_addr.s_addr),
28578 ntohs(cm_id->local_addr.sin_port));
28579
28580- atomic_inc(&cm_connects);
28581+ atomic_inc_unchecked(&cm_connects);
28582 nesqp->active_conn = 1;
28583
28584 /* cache the cm_id in the qp */
28585@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28586 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28587 return err;
28588 }
28589- atomic_inc(&cm_listens_created);
28590+ atomic_inc_unchecked(&cm_listens_created);
28591 }
28592
28593 cm_id->add_ref(cm_id);
28594@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28595 if (nesqp->destroyed) {
28596 return;
28597 }
28598- atomic_inc(&cm_connecteds);
28599+ atomic_inc_unchecked(&cm_connecteds);
28600 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28601 " local port 0x%04X. jiffies = %lu.\n",
28602 nesqp->hwqp.qp_id,
28603@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28604
28605 cm_id->add_ref(cm_id);
28606 ret = cm_id->event_handler(cm_id, &cm_event);
28607- atomic_inc(&cm_closes);
28608+ atomic_inc_unchecked(&cm_closes);
28609 cm_event.event = IW_CM_EVENT_CLOSE;
28610 cm_event.status = 0;
28611 cm_event.provider_data = cm_id->provider_data;
28612@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28613 return;
28614 cm_id = cm_node->cm_id;
28615
28616- atomic_inc(&cm_connect_reqs);
28617+ atomic_inc_unchecked(&cm_connect_reqs);
28618 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28619 cm_node, cm_id, jiffies);
28620
28621@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28622 return;
28623 cm_id = cm_node->cm_id;
28624
28625- atomic_inc(&cm_connect_reqs);
28626+ atomic_inc_unchecked(&cm_connect_reqs);
28627 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28628 cm_node, cm_id, jiffies);
28629
28630diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.h linux-3.1.1/drivers/infiniband/hw/nes/nes.h
28631--- linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-11 15:19:27.000000000 -0500
28632+++ linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-16 18:39:07.000000000 -0500
28633@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28634 extern unsigned int wqm_quanta;
28635 extern struct list_head nes_adapter_list;
28636
28637-extern atomic_t cm_connects;
28638-extern atomic_t cm_accepts;
28639-extern atomic_t cm_disconnects;
28640-extern atomic_t cm_closes;
28641-extern atomic_t cm_connecteds;
28642-extern atomic_t cm_connect_reqs;
28643-extern atomic_t cm_rejects;
28644-extern atomic_t mod_qp_timouts;
28645-extern atomic_t qps_created;
28646-extern atomic_t qps_destroyed;
28647-extern atomic_t sw_qps_destroyed;
28648+extern atomic_unchecked_t cm_connects;
28649+extern atomic_unchecked_t cm_accepts;
28650+extern atomic_unchecked_t cm_disconnects;
28651+extern atomic_unchecked_t cm_closes;
28652+extern atomic_unchecked_t cm_connecteds;
28653+extern atomic_unchecked_t cm_connect_reqs;
28654+extern atomic_unchecked_t cm_rejects;
28655+extern atomic_unchecked_t mod_qp_timouts;
28656+extern atomic_unchecked_t qps_created;
28657+extern atomic_unchecked_t qps_destroyed;
28658+extern atomic_unchecked_t sw_qps_destroyed;
28659 extern u32 mh_detected;
28660 extern u32 mh_pauses_sent;
28661 extern u32 cm_packets_sent;
28662@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28663 extern u32 cm_packets_received;
28664 extern u32 cm_packets_dropped;
28665 extern u32 cm_packets_retrans;
28666-extern atomic_t cm_listens_created;
28667-extern atomic_t cm_listens_destroyed;
28668+extern atomic_unchecked_t cm_listens_created;
28669+extern atomic_unchecked_t cm_listens_destroyed;
28670 extern u32 cm_backlog_drops;
28671-extern atomic_t cm_loopbacks;
28672-extern atomic_t cm_nodes_created;
28673-extern atomic_t cm_nodes_destroyed;
28674-extern atomic_t cm_accel_dropped_pkts;
28675-extern atomic_t cm_resets_recvd;
28676+extern atomic_unchecked_t cm_loopbacks;
28677+extern atomic_unchecked_t cm_nodes_created;
28678+extern atomic_unchecked_t cm_nodes_destroyed;
28679+extern atomic_unchecked_t cm_accel_dropped_pkts;
28680+extern atomic_unchecked_t cm_resets_recvd;
28681
28682 extern u32 int_mod_timer_init;
28683 extern u32 int_mod_cq_depth_256;
28684diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c
28685--- linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-11 15:19:27.000000000 -0500
28686+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-16 18:39:07.000000000 -0500
28687@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28688 target_stat_values[++index] = mh_detected;
28689 target_stat_values[++index] = mh_pauses_sent;
28690 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28691- target_stat_values[++index] = atomic_read(&cm_connects);
28692- target_stat_values[++index] = atomic_read(&cm_accepts);
28693- target_stat_values[++index] = atomic_read(&cm_disconnects);
28694- target_stat_values[++index] = atomic_read(&cm_connecteds);
28695- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28696- target_stat_values[++index] = atomic_read(&cm_rejects);
28697- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28698- target_stat_values[++index] = atomic_read(&qps_created);
28699- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28700- target_stat_values[++index] = atomic_read(&qps_destroyed);
28701- target_stat_values[++index] = atomic_read(&cm_closes);
28702+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28703+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28704+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28705+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28706+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28707+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28708+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28709+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28710+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28711+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28712+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28713 target_stat_values[++index] = cm_packets_sent;
28714 target_stat_values[++index] = cm_packets_bounced;
28715 target_stat_values[++index] = cm_packets_created;
28716 target_stat_values[++index] = cm_packets_received;
28717 target_stat_values[++index] = cm_packets_dropped;
28718 target_stat_values[++index] = cm_packets_retrans;
28719- target_stat_values[++index] = atomic_read(&cm_listens_created);
28720- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28721+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28722+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28723 target_stat_values[++index] = cm_backlog_drops;
28724- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28725- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28726- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28727- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28728- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28729+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28730+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28731+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28732+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28733+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28734 target_stat_values[++index] = nesadapter->free_4kpbl;
28735 target_stat_values[++index] = nesadapter->free_256pbl;
28736 target_stat_values[++index] = int_mod_timer_init;
28737diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c
28738--- linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-11 15:19:27.000000000 -0500
28739+++ linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-16 18:39:07.000000000 -0500
28740@@ -46,9 +46,9 @@
28741
28742 #include <rdma/ib_umem.h>
28743
28744-atomic_t mod_qp_timouts;
28745-atomic_t qps_created;
28746-atomic_t sw_qps_destroyed;
28747+atomic_unchecked_t mod_qp_timouts;
28748+atomic_unchecked_t qps_created;
28749+atomic_unchecked_t sw_qps_destroyed;
28750
28751 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28752
28753@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
28754 if (init_attr->create_flags)
28755 return ERR_PTR(-EINVAL);
28756
28757- atomic_inc(&qps_created);
28758+ atomic_inc_unchecked(&qps_created);
28759 switch (init_attr->qp_type) {
28760 case IB_QPT_RC:
28761 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28762@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
28763 struct iw_cm_event cm_event;
28764 int ret;
28765
28766- atomic_inc(&sw_qps_destroyed);
28767+ atomic_inc_unchecked(&sw_qps_destroyed);
28768 nesqp->destroyed = 1;
28769
28770 /* Blow away the connection if it exists. */
28771diff -urNp linux-3.1.1/drivers/infiniband/hw/qib/qib.h linux-3.1.1/drivers/infiniband/hw/qib/qib.h
28772--- linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-11 15:19:27.000000000 -0500
28773+++ linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-16 18:39:07.000000000 -0500
28774@@ -51,6 +51,7 @@
28775 #include <linux/completion.h>
28776 #include <linux/kref.h>
28777 #include <linux/sched.h>
28778+#include <linux/slab.h>
28779
28780 #include "qib_common.h"
28781 #include "qib_verbs.h"
28782diff -urNp linux-3.1.1/drivers/input/gameport/gameport.c linux-3.1.1/drivers/input/gameport/gameport.c
28783--- linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-11 15:19:27.000000000 -0500
28784+++ linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-16 18:39:07.000000000 -0500
28785@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28786 */
28787 static void gameport_init_port(struct gameport *gameport)
28788 {
28789- static atomic_t gameport_no = ATOMIC_INIT(0);
28790+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28791
28792 __module_get(THIS_MODULE);
28793
28794 mutex_init(&gameport->drv_mutex);
28795 device_initialize(&gameport->dev);
28796 dev_set_name(&gameport->dev, "gameport%lu",
28797- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28798+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28799 gameport->dev.bus = &gameport_bus;
28800 gameport->dev.release = gameport_release_port;
28801 if (gameport->parent)
28802diff -urNp linux-3.1.1/drivers/input/input.c linux-3.1.1/drivers/input/input.c
28803--- linux-3.1.1/drivers/input/input.c 2011-11-11 15:19:27.000000000 -0500
28804+++ linux-3.1.1/drivers/input/input.c 2011-11-16 18:39:07.000000000 -0500
28805@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28806 */
28807 int input_register_device(struct input_dev *dev)
28808 {
28809- static atomic_t input_no = ATOMIC_INIT(0);
28810+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28811 struct input_handler *handler;
28812 const char *path;
28813 int error;
28814@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28815 dev->setkeycode = input_default_setkeycode;
28816
28817 dev_set_name(&dev->dev, "input%ld",
28818- (unsigned long) atomic_inc_return(&input_no) - 1);
28819+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28820
28821 error = device_add(&dev->dev);
28822 if (error)
28823diff -urNp linux-3.1.1/drivers/input/joystick/sidewinder.c linux-3.1.1/drivers/input/joystick/sidewinder.c
28824--- linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-11 15:19:27.000000000 -0500
28825+++ linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-16 18:40:10.000000000 -0500
28826@@ -30,6 +30,7 @@
28827 #include <linux/kernel.h>
28828 #include <linux/module.h>
28829 #include <linux/slab.h>
28830+#include <linux/sched.h>
28831 #include <linux/init.h>
28832 #include <linux/input.h>
28833 #include <linux/gameport.h>
28834@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28835 unsigned char buf[SW_LENGTH];
28836 int i;
28837
28838+ pax_track_stack();
28839+
28840 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28841
28842 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28843diff -urNp linux-3.1.1/drivers/input/joystick/xpad.c linux-3.1.1/drivers/input/joystick/xpad.c
28844--- linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-11 15:19:27.000000000 -0500
28845+++ linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-16 18:39:07.000000000 -0500
28846@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
28847
28848 static int xpad_led_probe(struct usb_xpad *xpad)
28849 {
28850- static atomic_t led_seq = ATOMIC_INIT(0);
28851+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28852 long led_no;
28853 struct xpad_led *led;
28854 struct led_classdev *led_cdev;
28855@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
28856 if (!led)
28857 return -ENOMEM;
28858
28859- led_no = (long)atomic_inc_return(&led_seq) - 1;
28860+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28861
28862 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28863 led->xpad = xpad;
28864diff -urNp linux-3.1.1/drivers/input/mousedev.c linux-3.1.1/drivers/input/mousedev.c
28865--- linux-3.1.1/drivers/input/mousedev.c 2011-11-11 15:19:27.000000000 -0500
28866+++ linux-3.1.1/drivers/input/mousedev.c 2011-11-16 18:39:07.000000000 -0500
28867@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28868
28869 spin_unlock_irq(&client->packet_lock);
28870
28871- if (copy_to_user(buffer, data, count))
28872+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28873 return -EFAULT;
28874
28875 return count;
28876diff -urNp linux-3.1.1/drivers/input/serio/serio.c linux-3.1.1/drivers/input/serio/serio.c
28877--- linux-3.1.1/drivers/input/serio/serio.c 2011-11-11 15:19:27.000000000 -0500
28878+++ linux-3.1.1/drivers/input/serio/serio.c 2011-11-16 18:39:07.000000000 -0500
28879@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28880 */
28881 static void serio_init_port(struct serio *serio)
28882 {
28883- static atomic_t serio_no = ATOMIC_INIT(0);
28884+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28885
28886 __module_get(THIS_MODULE);
28887
28888@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28889 mutex_init(&serio->drv_mutex);
28890 device_initialize(&serio->dev);
28891 dev_set_name(&serio->dev, "serio%ld",
28892- (long)atomic_inc_return(&serio_no) - 1);
28893+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28894 serio->dev.bus = &serio_bus;
28895 serio->dev.release = serio_release_port;
28896 serio->dev.groups = serio_device_attr_groups;
28897diff -urNp linux-3.1.1/drivers/isdn/capi/capi.c linux-3.1.1/drivers/isdn/capi/capi.c
28898--- linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-11 15:19:27.000000000 -0500
28899+++ linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-16 18:39:07.000000000 -0500
28900@@ -83,8 +83,8 @@ struct capiminor {
28901
28902 struct capi20_appl *ap;
28903 u32 ncci;
28904- atomic_t datahandle;
28905- atomic_t msgid;
28906+ atomic_unchecked_t datahandle;
28907+ atomic_unchecked_t msgid;
28908
28909 struct tty_port port;
28910 int ttyinstop;
28911@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28912 capimsg_setu16(s, 2, mp->ap->applid);
28913 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28914 capimsg_setu8 (s, 5, CAPI_RESP);
28915- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28916+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28917 capimsg_setu32(s, 8, mp->ncci);
28918 capimsg_setu16(s, 12, datahandle);
28919 }
28920@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28921 mp->outbytes -= len;
28922 spin_unlock_bh(&mp->outlock);
28923
28924- datahandle = atomic_inc_return(&mp->datahandle);
28925+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28926 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28927 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28928 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28929 capimsg_setu16(skb->data, 2, mp->ap->applid);
28930 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28931 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28932- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28933+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28934 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28935 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28936 capimsg_setu16(skb->data, 16, len); /* Data length */
28937diff -urNp linux-3.1.1/drivers/isdn/gigaset/common.c linux-3.1.1/drivers/isdn/gigaset/common.c
28938--- linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-11 15:19:27.000000000 -0500
28939+++ linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-16 18:39:07.000000000 -0500
28940@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28941 cs->commands_pending = 0;
28942 cs->cur_at_seq = 0;
28943 cs->gotfwver = -1;
28944- cs->open_count = 0;
28945+ local_set(&cs->open_count, 0);
28946 cs->dev = NULL;
28947 cs->tty = NULL;
28948 cs->tty_dev = NULL;
28949diff -urNp linux-3.1.1/drivers/isdn/gigaset/gigaset.h linux-3.1.1/drivers/isdn/gigaset/gigaset.h
28950--- linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-11 15:19:27.000000000 -0500
28951+++ linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-16 18:39:07.000000000 -0500
28952@@ -35,6 +35,7 @@
28953 #include <linux/tty_driver.h>
28954 #include <linux/list.h>
28955 #include <linux/atomic.h>
28956+#include <asm/local.h>
28957
28958 #define GIG_VERSION {0, 5, 0, 0}
28959 #define GIG_COMPAT {0, 4, 0, 0}
28960@@ -433,7 +434,7 @@ struct cardstate {
28961 spinlock_t cmdlock;
28962 unsigned curlen, cmdbytes;
28963
28964- unsigned open_count;
28965+ local_t open_count;
28966 struct tty_struct *tty;
28967 struct tasklet_struct if_wake_tasklet;
28968 unsigned control_state;
28969diff -urNp linux-3.1.1/drivers/isdn/gigaset/interface.c linux-3.1.1/drivers/isdn/gigaset/interface.c
28970--- linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-11 15:19:27.000000000 -0500
28971+++ linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-16 18:39:07.000000000 -0500
28972@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28973 }
28974 tty->driver_data = cs;
28975
28976- ++cs->open_count;
28977-
28978- if (cs->open_count == 1) {
28979+ if (local_inc_return(&cs->open_count) == 1) {
28980 spin_lock_irqsave(&cs->lock, flags);
28981 cs->tty = tty;
28982 spin_unlock_irqrestore(&cs->lock, flags);
28983@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28984
28985 if (!cs->connected)
28986 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28987- else if (!cs->open_count)
28988+ else if (!local_read(&cs->open_count))
28989 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28990 else {
28991- if (!--cs->open_count) {
28992+ if (!local_dec_return(&cs->open_count)) {
28993 spin_lock_irqsave(&cs->lock, flags);
28994 cs->tty = NULL;
28995 spin_unlock_irqrestore(&cs->lock, flags);
28996@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28997 if (!cs->connected) {
28998 gig_dbg(DEBUG_IF, "not connected");
28999 retval = -ENODEV;
29000- } else if (!cs->open_count)
29001+ } else if (!local_read(&cs->open_count))
29002 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29003 else {
29004 retval = 0;
29005@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
29006 retval = -ENODEV;
29007 goto done;
29008 }
29009- if (!cs->open_count) {
29010+ if (!local_read(&cs->open_count)) {
29011 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29012 retval = -ENODEV;
29013 goto done;
29014@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29015 if (!cs->connected) {
29016 gig_dbg(DEBUG_IF, "not connected");
29017 retval = -ENODEV;
29018- } else if (!cs->open_count)
29019+ } else if (!local_read(&cs->open_count))
29020 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29021 else if (cs->mstate != MS_LOCKED) {
29022 dev_warn(cs->dev, "can't write to unlocked device\n");
29023@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29024
29025 if (!cs->connected)
29026 gig_dbg(DEBUG_IF, "not connected");
29027- else if (!cs->open_count)
29028+ else if (!local_read(&cs->open_count))
29029 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29030 else if (cs->mstate != MS_LOCKED)
29031 dev_warn(cs->dev, "can't write to unlocked device\n");
29032@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29033
29034 if (!cs->connected)
29035 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29036- else if (!cs->open_count)
29037+ else if (!local_read(&cs->open_count))
29038 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29039 else
29040 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29041@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29042
29043 if (!cs->connected)
29044 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29045- else if (!cs->open_count)
29046+ else if (!local_read(&cs->open_count))
29047 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29048 else
29049 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29050@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29051 goto out;
29052 }
29053
29054- if (!cs->open_count) {
29055+ if (!local_read(&cs->open_count)) {
29056 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29057 goto out;
29058 }
29059diff -urNp linux-3.1.1/drivers/isdn/hardware/avm/b1.c linux-3.1.1/drivers/isdn/hardware/avm/b1.c
29060--- linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-11 15:19:27.000000000 -0500
29061+++ linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-16 18:39:07.000000000 -0500
29062@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29063 }
29064 if (left) {
29065 if (t4file->user) {
29066- if (copy_from_user(buf, dp, left))
29067+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29068 return -EFAULT;
29069 } else {
29070 memcpy(buf, dp, left);
29071@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29072 }
29073 if (left) {
29074 if (config->user) {
29075- if (copy_from_user(buf, dp, left))
29076+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29077 return -EFAULT;
29078 } else {
29079 memcpy(buf, dp, left);
29080diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c
29081--- linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-11 15:19:27.000000000 -0500
29082+++ linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-16 18:40:10.000000000 -0500
29083@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29084 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29085 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29086
29087+ pax_track_stack();
29088
29089 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29090 {
29091diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c
29092--- linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-11 15:19:27.000000000 -0500
29093+++ linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-16 18:40:10.000000000 -0500
29094@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29095 IDI_SYNC_REQ req;
29096 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29097
29098+ pax_track_stack();
29099+
29100 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29101
29102 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29103diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c
29104--- linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-11 15:19:27.000000000 -0500
29105+++ linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-16 18:40:10.000000000 -0500
29106@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29107 IDI_SYNC_REQ req;
29108 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29109
29110+ pax_track_stack();
29111+
29112 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29113
29114 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29115diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c
29116--- linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-11 15:19:27.000000000 -0500
29117+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-16 18:40:10.000000000 -0500
29118@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29119 IDI_SYNC_REQ req;
29120 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29121
29122+ pax_track_stack();
29123+
29124 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29125
29126 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29127diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h
29128--- linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-11 15:19:27.000000000 -0500
29129+++ linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-16 18:39:07.000000000 -0500
29130@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29131 } diva_didd_add_adapter_t;
29132 typedef struct _diva_didd_remove_adapter {
29133 IDI_CALL p_request;
29134-} diva_didd_remove_adapter_t;
29135+} __no_const diva_didd_remove_adapter_t;
29136 typedef struct _diva_didd_read_adapter_array {
29137 void * buffer;
29138 dword length;
29139diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c
29140--- linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-11 15:19:27.000000000 -0500
29141+++ linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-16 18:40:10.000000000 -0500
29142@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29143 IDI_SYNC_REQ req;
29144 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29145
29146+ pax_track_stack();
29147+
29148 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29149
29150 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29151diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/message.c linux-3.1.1/drivers/isdn/hardware/eicon/message.c
29152--- linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-11 15:19:27.000000000 -0500
29153+++ linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-16 18:40:10.000000000 -0500
29154@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29155 dword d;
29156 word w;
29157
29158+ pax_track_stack();
29159+
29160 a = plci->adapter;
29161 Id = ((word)plci->Id<<8)|a->Id;
29162 PUT_WORD(&SS_Ind[4],0x0000);
29163@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29164 word j, n, w;
29165 dword d;
29166
29167+ pax_track_stack();
29168+
29169
29170 for(i=0;i<8;i++) bp_parms[i].length = 0;
29171 for(i=0;i<2;i++) global_config[i].length = 0;
29172@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29173 const byte llc3[] = {4,3,2,2,6,6,0};
29174 const byte header[] = {0,2,3,3,0,0,0};
29175
29176+ pax_track_stack();
29177+
29178 for(i=0;i<8;i++) bp_parms[i].length = 0;
29179 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29180 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29181@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29182 word appl_number_group_type[MAX_APPL];
29183 PLCI *auxplci;
29184
29185+ pax_track_stack();
29186+
29187 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29188
29189 if(!a->group_optimization_enabled)
29190diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c
29191--- linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-11 15:19:27.000000000 -0500
29192+++ linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-16 18:40:10.000000000 -0500
29193@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29194 IDI_SYNC_REQ req;
29195 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29196
29197+ pax_track_stack();
29198+
29199 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29200
29201 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29202diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h
29203--- linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-11 15:19:27.000000000 -0500
29204+++ linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-16 18:39:07.000000000 -0500
29205@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29206 typedef struct _diva_os_idi_adapter_interface {
29207 diva_init_card_proc_t cleanup_adapter_proc;
29208 diva_cmd_card_proc_t cmd_proc;
29209-} diva_os_idi_adapter_interface_t;
29210+} __no_const diva_os_idi_adapter_interface_t;
29211
29212 typedef struct _diva_os_xdi_adapter {
29213 struct list_head link;
29214diff -urNp linux-3.1.1/drivers/isdn/i4l/isdn_common.c linux-3.1.1/drivers/isdn/i4l/isdn_common.c
29215--- linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-11 15:19:27.000000000 -0500
29216+++ linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-16 18:40:10.000000000 -0500
29217@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
29218 } iocpar;
29219 void __user *argp = (void __user *)arg;
29220
29221+ pax_track_stack();
29222+
29223 #define name iocpar.name
29224 #define bname iocpar.bname
29225 #define iocts iocpar.iocts
29226diff -urNp linux-3.1.1/drivers/isdn/icn/icn.c linux-3.1.1/drivers/isdn/icn/icn.c
29227--- linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-11 15:19:27.000000000 -0500
29228+++ linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-16 18:39:07.000000000 -0500
29229@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
29230 if (count > len)
29231 count = len;
29232 if (user) {
29233- if (copy_from_user(msg, buf, count))
29234+ if (count > sizeof msg || copy_from_user(msg, buf, count))
29235 return -EFAULT;
29236 } else
29237 memcpy(msg, buf, count);
29238diff -urNp linux-3.1.1/drivers/lguest/core.c linux-3.1.1/drivers/lguest/core.c
29239--- linux-3.1.1/drivers/lguest/core.c 2011-11-11 15:19:27.000000000 -0500
29240+++ linux-3.1.1/drivers/lguest/core.c 2011-11-16 18:39:07.000000000 -0500
29241@@ -92,9 +92,17 @@ static __init int map_switcher(void)
29242 * it's worked so far. The end address needs +1 because __get_vm_area
29243 * allocates an extra guard page, so we need space for that.
29244 */
29245+
29246+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29247+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29248+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
29249+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29250+#else
29251 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29252 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
29253 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29254+#endif
29255+
29256 if (!switcher_vma) {
29257 err = -ENOMEM;
29258 printk("lguest: could not map switcher pages high\n");
29259@@ -119,7 +127,7 @@ static __init int map_switcher(void)
29260 * Now the Switcher is mapped at the right address, we can't fail!
29261 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
29262 */
29263- memcpy(switcher_vma->addr, start_switcher_text,
29264+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
29265 end_switcher_text - start_switcher_text);
29266
29267 printk(KERN_INFO "lguest: mapped switcher at %p\n",
29268diff -urNp linux-3.1.1/drivers/lguest/x86/core.c linux-3.1.1/drivers/lguest/x86/core.c
29269--- linux-3.1.1/drivers/lguest/x86/core.c 2011-11-11 15:19:27.000000000 -0500
29270+++ linux-3.1.1/drivers/lguest/x86/core.c 2011-11-16 18:39:07.000000000 -0500
29271@@ -59,7 +59,7 @@ static struct {
29272 /* Offset from where switcher.S was compiled to where we've copied it */
29273 static unsigned long switcher_offset(void)
29274 {
29275- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
29276+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
29277 }
29278
29279 /* This cpu's struct lguest_pages. */
29280@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
29281 * These copies are pretty cheap, so we do them unconditionally: */
29282 /* Save the current Host top-level page directory.
29283 */
29284+
29285+#ifdef CONFIG_PAX_PER_CPU_PGD
29286+ pages->state.host_cr3 = read_cr3();
29287+#else
29288 pages->state.host_cr3 = __pa(current->mm->pgd);
29289+#endif
29290+
29291 /*
29292 * Set up the Guest's page tables to see this CPU's pages (and no
29293 * other CPU's pages).
29294@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
29295 * compiled-in switcher code and the high-mapped copy we just made.
29296 */
29297 for (i = 0; i < IDT_ENTRIES; i++)
29298- default_idt_entries[i] += switcher_offset();
29299+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
29300
29301 /*
29302 * Set up the Switcher's per-cpu areas.
29303@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
29304 * it will be undisturbed when we switch. To change %cs and jump we
29305 * need this structure to feed to Intel's "lcall" instruction.
29306 */
29307- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
29308+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
29309 lguest_entry.segment = LGUEST_CS;
29310
29311 /*
29312diff -urNp linux-3.1.1/drivers/lguest/x86/switcher_32.S linux-3.1.1/drivers/lguest/x86/switcher_32.S
29313--- linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-11 15:19:27.000000000 -0500
29314+++ linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-16 18:39:07.000000000 -0500
29315@@ -87,6 +87,7 @@
29316 #include <asm/page.h>
29317 #include <asm/segment.h>
29318 #include <asm/lguest.h>
29319+#include <asm/processor-flags.h>
29320
29321 // We mark the start of the code to copy
29322 // It's placed in .text tho it's never run here
29323@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29324 // Changes type when we load it: damn Intel!
29325 // For after we switch over our page tables
29326 // That entry will be read-only: we'd crash.
29327+
29328+#ifdef CONFIG_PAX_KERNEXEC
29329+ mov %cr0, %edx
29330+ xor $X86_CR0_WP, %edx
29331+ mov %edx, %cr0
29332+#endif
29333+
29334 movl $(GDT_ENTRY_TSS*8), %edx
29335 ltr %dx
29336
29337@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29338 // Let's clear it again for our return.
29339 // The GDT descriptor of the Host
29340 // Points to the table after two "size" bytes
29341- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29342+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29343 // Clear "used" from type field (byte 5, bit 2)
29344- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29345+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29346+
29347+#ifdef CONFIG_PAX_KERNEXEC
29348+ mov %cr0, %eax
29349+ xor $X86_CR0_WP, %eax
29350+ mov %eax, %cr0
29351+#endif
29352
29353 // Once our page table's switched, the Guest is live!
29354 // The Host fades as we run this final step.
29355@@ -295,13 +309,12 @@ deliver_to_host:
29356 // I consulted gcc, and it gave
29357 // These instructions, which I gladly credit:
29358 leal (%edx,%ebx,8), %eax
29359- movzwl (%eax),%edx
29360- movl 4(%eax), %eax
29361- xorw %ax, %ax
29362- orl %eax, %edx
29363+ movl 4(%eax), %edx
29364+ movw (%eax), %dx
29365 // Now the address of the handler's in %edx
29366 // We call it now: its "iret" drops us home.
29367- jmp *%edx
29368+ ljmp $__KERNEL_CS, $1f
29369+1: jmp *%edx
29370
29371 // Every interrupt can come to us here
29372 // But we must truly tell each apart.
29373diff -urNp linux-3.1.1/drivers/macintosh/macio_asic.c linux-3.1.1/drivers/macintosh/macio_asic.c
29374--- linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-11 15:19:27.000000000 -0500
29375+++ linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-16 18:39:07.000000000 -0500
29376@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29377 * MacIO is matched against any Apple ID, it's probe() function
29378 * will then decide wether it applies or not
29379 */
29380-static const struct pci_device_id __devinitdata pci_ids [] = { {
29381+static const struct pci_device_id __devinitconst pci_ids [] = { {
29382 .vendor = PCI_VENDOR_ID_APPLE,
29383 .device = PCI_ANY_ID,
29384 .subvendor = PCI_ANY_ID,
29385diff -urNp linux-3.1.1/drivers/md/dm.c linux-3.1.1/drivers/md/dm.c
29386--- linux-3.1.1/drivers/md/dm.c 2011-11-11 15:19:27.000000000 -0500
29387+++ linux-3.1.1/drivers/md/dm.c 2011-11-16 18:39:07.000000000 -0500
29388@@ -165,9 +165,9 @@ struct mapped_device {
29389 /*
29390 * Event handling.
29391 */
29392- atomic_t event_nr;
29393+ atomic_unchecked_t event_nr;
29394 wait_queue_head_t eventq;
29395- atomic_t uevent_seq;
29396+ atomic_unchecked_t uevent_seq;
29397 struct list_head uevent_list;
29398 spinlock_t uevent_lock; /* Protect access to uevent_list */
29399
29400@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(i
29401 rwlock_init(&md->map_lock);
29402 atomic_set(&md->holders, 1);
29403 atomic_set(&md->open_count, 0);
29404- atomic_set(&md->event_nr, 0);
29405- atomic_set(&md->uevent_seq, 0);
29406+ atomic_set_unchecked(&md->event_nr, 0);
29407+ atomic_set_unchecked(&md->uevent_seq, 0);
29408 INIT_LIST_HEAD(&md->uevent_list);
29409 spin_lock_init(&md->uevent_lock);
29410
29411@@ -1978,7 +1978,7 @@ static void event_callback(void *context
29412
29413 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29414
29415- atomic_inc(&md->event_nr);
29416+ atomic_inc_unchecked(&md->event_nr);
29417 wake_up(&md->eventq);
29418 }
29419
29420@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_devi
29421
29422 uint32_t dm_next_uevent_seq(struct mapped_device *md)
29423 {
29424- return atomic_add_return(1, &md->uevent_seq);
29425+ return atomic_add_return_unchecked(1, &md->uevent_seq);
29426 }
29427
29428 uint32_t dm_get_event_nr(struct mapped_device *md)
29429 {
29430- return atomic_read(&md->event_nr);
29431+ return atomic_read_unchecked(&md->event_nr);
29432 }
29433
29434 int dm_wait_event(struct mapped_device *md, int event_nr)
29435 {
29436 return wait_event_interruptible(md->eventq,
29437- (event_nr != atomic_read(&md->event_nr)));
29438+ (event_nr != atomic_read_unchecked(&md->event_nr)));
29439 }
29440
29441 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29442diff -urNp linux-3.1.1/drivers/md/dm-ioctl.c linux-3.1.1/drivers/md/dm-ioctl.c
29443--- linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-11 15:19:27.000000000 -0500
29444+++ linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-16 18:39:07.000000000 -0500
29445@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, str
29446 cmd == DM_LIST_VERSIONS_CMD)
29447 return 0;
29448
29449- if ((cmd == DM_DEV_CREATE_CMD)) {
29450+ if (cmd == DM_DEV_CREATE_CMD) {
29451 if (!*param->name) {
29452 DMWARN("name not supplied when creating device");
29453 return -EINVAL;
29454diff -urNp linux-3.1.1/drivers/md/dm-raid1.c linux-3.1.1/drivers/md/dm-raid1.c
29455--- linux-3.1.1/drivers/md/dm-raid1.c 2011-11-11 15:19:27.000000000 -0500
29456+++ linux-3.1.1/drivers/md/dm-raid1.c 2011-11-16 18:39:07.000000000 -0500
29457@@ -40,7 +40,7 @@ enum dm_raid1_error {
29458
29459 struct mirror {
29460 struct mirror_set *ms;
29461- atomic_t error_count;
29462+ atomic_unchecked_t error_count;
29463 unsigned long error_type;
29464 struct dm_dev *dev;
29465 sector_t offset;
29466@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29467 struct mirror *m;
29468
29469 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29470- if (!atomic_read(&m->error_count))
29471+ if (!atomic_read_unchecked(&m->error_count))
29472 return m;
29473
29474 return NULL;
29475@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29476 * simple way to tell if a device has encountered
29477 * errors.
29478 */
29479- atomic_inc(&m->error_count);
29480+ atomic_inc_unchecked(&m->error_count);
29481
29482 if (test_and_set_bit(error_type, &m->error_type))
29483 return;
29484@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29485 struct mirror *m = get_default_mirror(ms);
29486
29487 do {
29488- if (likely(!atomic_read(&m->error_count)))
29489+ if (likely(!atomic_read_unchecked(&m->error_count)))
29490 return m;
29491
29492 if (m-- == ms->mirror)
29493@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29494 {
29495 struct mirror *default_mirror = get_default_mirror(m->ms);
29496
29497- return !atomic_read(&default_mirror->error_count);
29498+ return !atomic_read_unchecked(&default_mirror->error_count);
29499 }
29500
29501 static int mirror_available(struct mirror_set *ms, struct bio *bio)
29502@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29503 */
29504 if (likely(region_in_sync(ms, region, 1)))
29505 m = choose_mirror(ms, bio->bi_sector);
29506- else if (m && atomic_read(&m->error_count))
29507+ else if (m && atomic_read_unchecked(&m->error_count))
29508 m = NULL;
29509
29510 if (likely(m))
29511@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29512 }
29513
29514 ms->mirror[mirror].ms = ms;
29515- atomic_set(&(ms->mirror[mirror].error_count), 0);
29516+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29517 ms->mirror[mirror].error_type = 0;
29518 ms->mirror[mirror].offset = offset;
29519
29520@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29521 */
29522 static char device_status_char(struct mirror *m)
29523 {
29524- if (!atomic_read(&(m->error_count)))
29525+ if (!atomic_read_unchecked(&(m->error_count)))
29526 return 'A';
29527
29528 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29529diff -urNp linux-3.1.1/drivers/md/dm-stripe.c linux-3.1.1/drivers/md/dm-stripe.c
29530--- linux-3.1.1/drivers/md/dm-stripe.c 2011-11-11 15:19:27.000000000 -0500
29531+++ linux-3.1.1/drivers/md/dm-stripe.c 2011-11-16 18:39:07.000000000 -0500
29532@@ -20,7 +20,7 @@ struct stripe {
29533 struct dm_dev *dev;
29534 sector_t physical_start;
29535
29536- atomic_t error_count;
29537+ atomic_unchecked_t error_count;
29538 };
29539
29540 struct stripe_c {
29541@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29542 kfree(sc);
29543 return r;
29544 }
29545- atomic_set(&(sc->stripe[i].error_count), 0);
29546+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29547 }
29548
29549 ti->private = sc;
29550@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29551 DMEMIT("%d ", sc->stripes);
29552 for (i = 0; i < sc->stripes; i++) {
29553 DMEMIT("%s ", sc->stripe[i].dev->name);
29554- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29555+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29556 'D' : 'A';
29557 }
29558 buffer[i] = '\0';
29559@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29560 */
29561 for (i = 0; i < sc->stripes; i++)
29562 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29563- atomic_inc(&(sc->stripe[i].error_count));
29564- if (atomic_read(&(sc->stripe[i].error_count)) <
29565+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29566+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29567 DM_IO_ERROR_THRESHOLD)
29568 schedule_work(&sc->trigger_event);
29569 }
29570diff -urNp linux-3.1.1/drivers/md/dm-table.c linux-3.1.1/drivers/md/dm-table.c
29571--- linux-3.1.1/drivers/md/dm-table.c 2011-11-11 15:19:27.000000000 -0500
29572+++ linux-3.1.1/drivers/md/dm-table.c 2011-11-16 18:39:07.000000000 -0500
29573@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct
29574 if (!dev_size)
29575 return 0;
29576
29577- if ((start >= dev_size) || (start + len > dev_size)) {
29578+ if ((start >= dev_size) || (len > dev_size - start)) {
29579 DMWARN("%s: %s too small for target: "
29580 "start=%llu, len=%llu, dev_size=%llu",
29581 dm_device_name(ti->table->md), bdevname(bdev, b),
29582diff -urNp linux-3.1.1/drivers/md/md.c linux-3.1.1/drivers/md/md.c
29583--- linux-3.1.1/drivers/md/md.c 2011-11-11 15:19:27.000000000 -0500
29584+++ linux-3.1.1/drivers/md/md.c 2011-11-16 18:39:07.000000000 -0500
29585@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
29586 * start build, activate spare
29587 */
29588 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29589-static atomic_t md_event_count;
29590+static atomic_unchecked_t md_event_count;
29591 void md_new_event(mddev_t *mddev)
29592 {
29593- atomic_inc(&md_event_count);
29594+ atomic_inc_unchecked(&md_event_count);
29595 wake_up(&md_event_waiters);
29596 }
29597 EXPORT_SYMBOL_GPL(md_new_event);
29598@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29599 */
29600 static void md_new_event_inintr(mddev_t *mddev)
29601 {
29602- atomic_inc(&md_event_count);
29603+ atomic_inc_unchecked(&md_event_count);
29604 wake_up(&md_event_waiters);
29605 }
29606
29607@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev
29608
29609 rdev->preferred_minor = 0xffff;
29610 rdev->data_offset = le64_to_cpu(sb->data_offset);
29611- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29612+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29613
29614 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29615 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29616@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev,
29617 else
29618 sb->resync_offset = cpu_to_le64(0);
29619
29620- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29621+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29622
29623 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29624 sb->size = cpu_to_le64(mddev->dev_sectors);
29625@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29626 static ssize_t
29627 errors_show(mdk_rdev_t *rdev, char *page)
29628 {
29629- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29630+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29631 }
29632
29633 static ssize_t
29634@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29635 char *e;
29636 unsigned long n = simple_strtoul(buf, &e, 10);
29637 if (*buf && (*e == 0 || *e == '\n')) {
29638- atomic_set(&rdev->corrected_errors, n);
29639+ atomic_set_unchecked(&rdev->corrected_errors, n);
29640 return len;
29641 }
29642 return -EINVAL;
29643@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
29644 rdev->sb_loaded = 0;
29645 rdev->bb_page = NULL;
29646 atomic_set(&rdev->nr_pending, 0);
29647- atomic_set(&rdev->read_errors, 0);
29648- atomic_set(&rdev->corrected_errors, 0);
29649+ atomic_set_unchecked(&rdev->read_errors, 0);
29650+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29651
29652 INIT_LIST_HEAD(&rdev->same_set);
29653 init_waitqueue_head(&rdev->blocked_wait);
29654@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *
29655
29656 spin_unlock(&pers_lock);
29657 seq_printf(seq, "\n");
29658- seq->poll_event = atomic_read(&md_event_count);
29659+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29660 return 0;
29661 }
29662 if (v == (void*)2) {
29663@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *
29664 chunk_kb ? "KB" : "B");
29665 if (bitmap->file) {
29666 seq_printf(seq, ", file: ");
29667- seq_path(seq, &bitmap->file->f_path, " \t\n");
29668+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29669 }
29670
29671 seq_printf(seq, "\n");
29672@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *ino
29673 return error;
29674
29675 seq = file->private_data;
29676- seq->poll_event = atomic_read(&md_event_count);
29677+ seq->poll_event = atomic_read_unchecked(&md_event_count);
29678 return error;
29679 }
29680
29681@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct f
29682 /* always allow read */
29683 mask = POLLIN | POLLRDNORM;
29684
29685- if (seq->poll_event != atomic_read(&md_event_count))
29686+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
29687 mask |= POLLERR | POLLPRI;
29688 return mask;
29689 }
29690@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev,
29691 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29692 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29693 (int)part_stat_read(&disk->part0, sectors[1]) -
29694- atomic_read(&disk->sync_io);
29695+ atomic_read_unchecked(&disk->sync_io);
29696 /* sync IO will cause sync_io to increase before the disk_stats
29697 * as sync_io is counted when a request starts, and
29698 * disk_stats is counted when it completes.
29699diff -urNp linux-3.1.1/drivers/md/md.h linux-3.1.1/drivers/md/md.h
29700--- linux-3.1.1/drivers/md/md.h 2011-11-11 15:19:27.000000000 -0500
29701+++ linux-3.1.1/drivers/md/md.h 2011-11-16 18:39:07.000000000 -0500
29702@@ -124,13 +124,13 @@ struct mdk_rdev_s
29703 * only maintained for arrays that
29704 * support hot removal
29705 */
29706- atomic_t read_errors; /* number of consecutive read errors that
29707+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29708 * we have tried to ignore.
29709 */
29710 struct timespec last_read_error; /* monotonic time since our
29711 * last read error
29712 */
29713- atomic_t corrected_errors; /* number of corrected read errors,
29714+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29715 * for reporting to userspace and storing
29716 * in superblock.
29717 */
29718@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_
29719
29720 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29721 {
29722- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29723+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29724 }
29725
29726 struct mdk_personality
29727diff -urNp linux-3.1.1/drivers/md/raid10.c linux-3.1.1/drivers/md/raid10.c
29728--- linux-3.1.1/drivers/md/raid10.c 2011-11-11 15:19:27.000000000 -0500
29729+++ linux-3.1.1/drivers/md/raid10.c 2011-11-16 18:39:07.000000000 -0500
29730@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bi
29731 /* The write handler will notice the lack of
29732 * R10BIO_Uptodate and record any errors etc
29733 */
29734- atomic_add(r10_bio->sectors,
29735+ atomic_add_unchecked(r10_bio->sectors,
29736 &conf->mirrors[d].rdev->corrected_errors);
29737
29738 /* for reconstruct, we always reschedule after a read.
29739@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mdde
29740 {
29741 struct timespec cur_time_mon;
29742 unsigned long hours_since_last;
29743- unsigned int read_errors = atomic_read(&rdev->read_errors);
29744+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29745
29746 ktime_get_ts(&cur_time_mon);
29747
29748@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mdde
29749 * overflowing the shift of read_errors by hours_since_last.
29750 */
29751 if (hours_since_last >= 8 * sizeof(read_errors))
29752- atomic_set(&rdev->read_errors, 0);
29753+ atomic_set_unchecked(&rdev->read_errors, 0);
29754 else
29755- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29756+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29757 }
29758
29759 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
29760@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf,
29761 return;
29762
29763 check_decay_read_errors(mddev, rdev);
29764- atomic_inc(&rdev->read_errors);
29765- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29766+ atomic_inc_unchecked(&rdev->read_errors);
29767+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29768 char b[BDEVNAME_SIZE];
29769 bdevname(rdev->bdev, b);
29770
29771@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf,
29772 "md/raid10:%s: %s: Raid device exceeded "
29773 "read_error threshold [cur %d:max %d]\n",
29774 mdname(mddev), b,
29775- atomic_read(&rdev->read_errors), max_read_errors);
29776+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29777 printk(KERN_NOTICE
29778 "md/raid10:%s: %s: Failing raid device\n",
29779 mdname(mddev), b);
29780@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf,
29781 (unsigned long long)(
29782 sect + rdev->data_offset),
29783 bdevname(rdev->bdev, b));
29784- atomic_add(s, &rdev->corrected_errors);
29785+ atomic_add_unchecked(s, &rdev->corrected_errors);
29786 }
29787
29788 rdev_dec_pending(rdev, mddev);
29789diff -urNp linux-3.1.1/drivers/md/raid1.c linux-3.1.1/drivers/md/raid1.c
29790--- linux-3.1.1/drivers/md/raid1.c 2011-11-11 15:19:27.000000000 -0500
29791+++ linux-3.1.1/drivers/md/raid1.c 2011-11-16 18:39:07.000000000 -0500
29792@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *
29793 if (r1_sync_page_io(rdev, sect, s,
29794 bio->bi_io_vec[idx].bv_page,
29795 READ) != 0)
29796- atomic_add(s, &rdev->corrected_errors);
29797+ atomic_add_unchecked(s, &rdev->corrected_errors);
29798 }
29799 sectors -= s;
29800 sect += s;
29801@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf,
29802 test_bit(In_sync, &rdev->flags)) {
29803 if (r1_sync_page_io(rdev, sect, s,
29804 conf->tmppage, READ)) {
29805- atomic_add(s, &rdev->corrected_errors);
29806+ atomic_add_unchecked(s, &rdev->corrected_errors);
29807 printk(KERN_INFO
29808 "md/raid1:%s: read error corrected "
29809 "(%d sectors at %llu on %s)\n",
29810diff -urNp linux-3.1.1/drivers/md/raid5.c linux-3.1.1/drivers/md/raid5.c
29811--- linux-3.1.1/drivers/md/raid5.c 2011-11-11 15:19:27.000000000 -0500
29812+++ linux-3.1.1/drivers/md/raid5.c 2011-11-16 18:40:10.000000000 -0500
29813@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struc
29814 (unsigned long long)(sh->sector
29815 + rdev->data_offset),
29816 bdevname(rdev->bdev, b));
29817- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
29818+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
29819 clear_bit(R5_ReadError, &sh->dev[i].flags);
29820 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29821 }
29822- if (atomic_read(&conf->disks[i].rdev->read_errors))
29823- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29824+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29825+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29826 } else {
29827 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29828 int retry = 0;
29829 rdev = conf->disks[i].rdev;
29830
29831 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29832- atomic_inc(&rdev->read_errors);
29833+ atomic_inc_unchecked(&rdev->read_errors);
29834 if (conf->mddev->degraded >= conf->max_degraded)
29835 printk_ratelimited(
29836 KERN_WARNING
29837@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struc
29838 (unsigned long long)(sh->sector
29839 + rdev->data_offset),
29840 bdn);
29841- else if (atomic_read(&rdev->read_errors)
29842+ else if (atomic_read_unchecked(&rdev->read_errors)
29843 > conf->max_nr_stripes)
29844 printk(KERN_WARNING
29845 "md/raid:%s: Too many read errors, failing device %s.\n",
29846@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct s
29847 sector_t r_sector;
29848 struct stripe_head sh2;
29849
29850+ pax_track_stack();
29851
29852 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29853 stripe = new_sector;
29854diff -urNp linux-3.1.1/drivers/media/common/saa7146_hlp.c linux-3.1.1/drivers/media/common/saa7146_hlp.c
29855--- linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-11 15:19:27.000000000 -0500
29856+++ linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-16 18:40:10.000000000 -0500
29857@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29858
29859 int x[32], y[32], w[32], h[32];
29860
29861+ pax_track_stack();
29862+
29863 /* clear out memory */
29864 memset(&line_list[0], 0x00, sizeof(u32)*32);
29865 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29866diff -urNp linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c
29867--- linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-11 15:19:27.000000000 -0500
29868+++ linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-16 18:39:07.000000000 -0500
29869@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
29870 .subvendor = _subvend, .subdevice = _subdev, \
29871 .driver_data = (unsigned long)&_driverdata }
29872
29873-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
29874+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
29875 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
29876 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
29877 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
29878diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29879--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-11 15:19:27.000000000 -0500
29880+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-16 18:40:10.000000000 -0500
29881@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29882 u8 buf[HOST_LINK_BUF_SIZE];
29883 int i;
29884
29885+ pax_track_stack();
29886+
29887 dprintk("%s\n", __func__);
29888
29889 /* check if we have space for a link buf in the rx_buffer */
29890@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29891 unsigned long timeout;
29892 int written;
29893
29894+ pax_track_stack();
29895+
29896 dprintk("%s\n", __func__);
29897
29898 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29899diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h
29900--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-11 15:19:27.000000000 -0500
29901+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-16 18:40:10.000000000 -0500
29902@@ -68,12 +68,12 @@ struct dvb_demux_feed {
29903 union {
29904 struct dmx_ts_feed ts;
29905 struct dmx_section_feed sec;
29906- } feed;
29907+ } __no_const feed;
29908
29909 union {
29910 dmx_ts_cb ts;
29911 dmx_section_cb sec;
29912- } cb;
29913+ } __no_const cb;
29914
29915 struct dvb_demux *demux;
29916 void *priv;
29917diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c
29918--- linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-11 15:19:27.000000000 -0500
29919+++ linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-16 18:39:07.000000000 -0500
29920@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29921 const struct dvb_device *template, void *priv, int type)
29922 {
29923 struct dvb_device *dvbdev;
29924- struct file_operations *dvbdevfops;
29925+ file_operations_no_const *dvbdevfops;
29926 struct device *clsdev;
29927 int minor;
29928 int id;
29929diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c
29930--- linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-11 15:19:27.000000000 -0500
29931+++ linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-16 18:39:07.000000000 -0500
29932@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29933 struct dib0700_adapter_state {
29934 int (*set_param_save) (struct dvb_frontend *,
29935 struct dvb_frontend_parameters *);
29936-};
29937+} __no_const;
29938
29939 static int dib7070_set_param_override(struct dvb_frontend *fe,
29940 struct dvb_frontend_parameters *fep)
29941diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c
29942--- linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-11 15:19:27.000000000 -0500
29943+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-16 18:40:10.000000000 -0500
29944@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb
29945 if (!buf)
29946 return -ENOMEM;
29947
29948+ pax_track_stack();
29949+
29950 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29951 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29952 hx.addr, hx.len, hx.chk);
29953diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dibusb.h linux-3.1.1/drivers/media/dvb/dvb-usb/dibusb.h
29954--- linux-3.1.1/drivers/media/dvb/dvb-usb/dibusb.h 2011-11-11 15:19:27.000000000 -0500
29955+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dibusb.h 2011-11-16 18:40:10.000000000 -0500
29956@@ -97,7 +97,7 @@
29957 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
29958
29959 struct dibusb_state {
29960- struct dib_fe_xfer_ops ops;
29961+ dib_fe_xfer_ops_no_const ops;
29962 int mt2060_present;
29963 u8 tuner_addr;
29964 };
29965diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c
29966--- linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-11 15:19:27.000000000 -0500
29967+++ linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-16 18:39:07.000000000 -0500
29968@@ -95,7 +95,7 @@ struct su3000_state {
29969
29970 struct s6x0_state {
29971 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29972-};
29973+} __no_const;
29974
29975 /* debug */
29976 static int dvb_usb_dw2102_debug;
29977diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c
29978--- linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-11 15:19:27.000000000 -0500
29979+++ linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-16 18:40:10.000000000 -0500
29980@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29981 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29982 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29983
29984+ pax_track_stack();
29985
29986 data[0] = 0x8a;
29987 len_in = 1;
29988@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29989 int ret = 0, len_in;
29990 u8 data[512] = {0};
29991
29992+ pax_track_stack();
29993+
29994 data[0] = 0x0a;
29995 len_in = 1;
29996 info("FRM Firmware Cold Reset");
29997diff -urNp linux-3.1.1/drivers/media/dvb/frontends/dib3000.h linux-3.1.1/drivers/media/dvb/frontends/dib3000.h
29998--- linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-11 15:19:27.000000000 -0500
29999+++ linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-16 18:40:10.000000000 -0500
30000@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
30001 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
30002 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
30003 };
30004+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
30005
30006 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
30007 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30008- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
30009+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
30010 #else
30011 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30012 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
30013diff -urNp linux-3.1.1/drivers/media/dvb/frontends/dib3000mb.c linux-3.1.1/drivers/media/dvb/frontends/dib3000mb.c
30014--- linux-3.1.1/drivers/media/dvb/frontends/dib3000mb.c 2011-11-11 15:19:27.000000000 -0500
30015+++ linux-3.1.1/drivers/media/dvb/frontends/dib3000mb.c 2011-11-16 18:40:10.000000000 -0500
30016@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
30017 static struct dvb_frontend_ops dib3000mb_ops;
30018
30019 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30020- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
30021+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
30022 {
30023 struct dib3000_state* state = NULL;
30024
30025diff -urNp linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c
30026--- linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-11 15:19:27.000000000 -0500
30027+++ linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-16 18:40:10.000000000 -0500
30028@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
30029 int ret = -1;
30030 int sync;
30031
30032+ pax_track_stack();
30033+
30034 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
30035
30036 fcp = 3000;
30037diff -urNp linux-3.1.1/drivers/media/dvb/frontends/or51211.c linux-3.1.1/drivers/media/dvb/frontends/or51211.c
30038--- linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-11 15:19:27.000000000 -0500
30039+++ linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-16 18:40:10.000000000 -0500
30040@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30041 u8 tudata[585];
30042 int i;
30043
30044+ pax_track_stack();
30045+
30046 dprintk("Firmware is %zd bytes\n",fw->size);
30047
30048 /* Get eprom data */
30049diff -urNp linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c
30050--- linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-11 15:19:27.000000000 -0500
30051+++ linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-16 18:39:07.000000000 -0500
30052@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780
30053
30054 /****************************************************************************/
30055
30056-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30057+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30058 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30059 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30060 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30061diff -urNp linux-3.1.1/drivers/media/radio/radio-cadet.c linux-3.1.1/drivers/media/radio/radio-cadet.c
30062--- linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-11 15:19:27.000000000 -0500
30063+++ linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-16 18:39:07.000000000 -0500
30064@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
30065 unsigned char readbuf[RDS_BUFFER];
30066 int i = 0;
30067
30068+ if (count > RDS_BUFFER)
30069+ return -EFAULT;
30070 mutex_lock(&dev->lock);
30071 if (dev->rdsstat == 0) {
30072 dev->rdsstat = 1;
30073diff -urNp linux-3.1.1/drivers/media/video/au0828/au0828.h linux-3.1.1/drivers/media/video/au0828/au0828.h
30074--- linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-11 15:19:27.000000000 -0500
30075+++ linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-16 18:39:07.000000000 -0500
30076@@ -191,7 +191,7 @@ struct au0828_dev {
30077
30078 /* I2C */
30079 struct i2c_adapter i2c_adap;
30080- struct i2c_algorithm i2c_algo;
30081+ i2c_algorithm_no_const i2c_algo;
30082 struct i2c_client i2c_client;
30083 u32 i2c_rc;
30084
30085diff -urNp linux-3.1.1/drivers/media/video/cx18/cx18-driver.c linux-3.1.1/drivers/media/video/cx18/cx18-driver.c
30086--- linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-11 15:19:27.000000000 -0500
30087+++ linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-16 18:40:10.000000000 -0500
30088@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30089 struct i2c_client c;
30090 u8 eedata[256];
30091
30092+ pax_track_stack();
30093+
30094 memset(&c, 0, sizeof(c));
30095 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30096 c.adapter = &cx->i2c_adap[0];
30097diff -urNp linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c
30098--- linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-11 15:19:27.000000000 -0500
30099+++ linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-16 18:40:10.000000000 -0500
30100@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30101 bool handle = false;
30102 struct ir_raw_event ir_core_event[64];
30103
30104+ pax_track_stack();
30105+
30106 do {
30107 num = 0;
30108 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30109diff -urNp linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c
30110--- linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-11 15:19:27.000000000 -0500
30111+++ linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-16 18:39:07.000000000 -0500
30112@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
30113 * Only boards with eeprom and byte 1 at eeprom=1 have it
30114 */
30115
30116-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30117+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30118 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30119 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30120 {0, }
30121diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30122--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-11 15:19:27.000000000 -0500
30123+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-16 18:40:10.000000000 -0500
30124@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30125 u8 *eeprom;
30126 struct tveeprom tvdata;
30127
30128+ pax_track_stack();
30129+
30130 memset(&tvdata,0,sizeof(tvdata));
30131
30132 eeprom = pvr2_eeprom_fetch(hdw);
30133diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
30134--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-11 15:19:27.000000000 -0500
30135+++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-16 18:39:07.000000000 -0500
30136@@ -196,7 +196,7 @@ struct pvr2_hdw {
30137
30138 /* I2C stuff */
30139 struct i2c_adapter i2c_adap;
30140- struct i2c_algorithm i2c_algo;
30141+ i2c_algorithm_no_const i2c_algo;
30142 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
30143 int i2c_cx25840_hack_state;
30144 int i2c_linked;
30145diff -urNp linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c
30146--- linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-11 15:19:27.000000000 -0500
30147+++ linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-16 18:40:10.000000000 -0500
30148@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30149 unsigned char localPAT[256];
30150 unsigned char localPMT[256];
30151
30152+ pax_track_stack();
30153+
30154 /* Set video format - must be done first as it resets other settings */
30155 set_reg8(client, 0x41, h->video_format);
30156
30157diff -urNp linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c
30158--- linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-11 15:19:27.000000000 -0500
30159+++ linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-16 18:40:10.000000000 -0500
30160@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30161 u8 tmp[512];
30162 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30163
30164+ pax_track_stack();
30165+
30166 /* While any outstand message on the bus exists... */
30167 do {
30168
30169@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30170 u8 tmp[512];
30171 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30172
30173+ pax_track_stack();
30174+
30175 while (loop) {
30176
30177 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30178diff -urNp linux-3.1.1/drivers/media/video/timblogiw.c linux-3.1.1/drivers/media/video/timblogiw.c
30179--- linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-11 15:19:27.000000000 -0500
30180+++ linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-16 18:40:10.000000000 -0500
30181@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *f
30182
30183 /* Platform device functions */
30184
30185-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30186+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30187 .vidioc_querycap = timblogiw_querycap,
30188 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30189 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30190diff -urNp linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c
30191--- linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-11 15:19:27.000000000 -0500
30192+++ linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-16 18:40:10.000000000 -0500
30193@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30194 unsigned char rv, gv, bv;
30195 static unsigned char *Y, *U, *V;
30196
30197+ pax_track_stack();
30198+
30199 frame = usbvision->cur_frame;
30200 image_size = frame->frmwidth * frame->frmheight;
30201 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30202diff -urNp linux-3.1.1/drivers/media/video/videobuf-dma-sg.c linux-3.1.1/drivers/media/video/videobuf-dma-sg.c
30203--- linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-11 15:19:27.000000000 -0500
30204+++ linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-16 18:40:10.000000000 -0500
30205@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
30206 {
30207 struct videobuf_queue q;
30208
30209+ pax_track_stack();
30210+
30211 /* Required to make generic handler to call __videobuf_alloc */
30212 q.int_ops = &sg_ops;
30213
30214diff -urNp linux-3.1.1/drivers/message/fusion/mptbase.c linux-3.1.1/drivers/message/fusion/mptbase.c
30215--- linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-11 15:19:27.000000000 -0500
30216+++ linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-16 18:40:10.000000000 -0500
30217@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30218 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30219 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30220
30221+#ifdef CONFIG_GRKERNSEC_HIDESYM
30222+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30223+#else
30224 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30225 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30226+#endif
30227+
30228 /*
30229 * Rounding UP to nearest 4-kB boundary here...
30230 */
30231diff -urNp linux-3.1.1/drivers/message/fusion/mptsas.c linux-3.1.1/drivers/message/fusion/mptsas.c
30232--- linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-11 15:19:27.000000000 -0500
30233+++ linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-16 18:39:07.000000000 -0500
30234@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
30235 return 0;
30236 }
30237
30238+static inline void
30239+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30240+{
30241+ if (phy_info->port_details) {
30242+ phy_info->port_details->rphy = rphy;
30243+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30244+ ioc->name, rphy));
30245+ }
30246+
30247+ if (rphy) {
30248+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30249+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30250+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30251+ ioc->name, rphy, rphy->dev.release));
30252+ }
30253+}
30254+
30255 /* no mutex */
30256 static void
30257 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30258@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30259 return NULL;
30260 }
30261
30262-static inline void
30263-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30264-{
30265- if (phy_info->port_details) {
30266- phy_info->port_details->rphy = rphy;
30267- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30268- ioc->name, rphy));
30269- }
30270-
30271- if (rphy) {
30272- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30273- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30274- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30275- ioc->name, rphy, rphy->dev.release));
30276- }
30277-}
30278-
30279 static inline struct sas_port *
30280 mptsas_get_port(struct mptsas_phyinfo *phy_info)
30281 {
30282diff -urNp linux-3.1.1/drivers/message/fusion/mptscsih.c linux-3.1.1/drivers/message/fusion/mptscsih.c
30283--- linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-11 15:19:27.000000000 -0500
30284+++ linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-16 18:39:07.000000000 -0500
30285@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
30286
30287 h = shost_priv(SChost);
30288
30289- if (h) {
30290- if (h->info_kbuf == NULL)
30291- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30292- return h->info_kbuf;
30293- h->info_kbuf[0] = '\0';
30294+ if (!h)
30295+ return NULL;
30296
30297- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30298- h->info_kbuf[size-1] = '\0';
30299- }
30300+ if (h->info_kbuf == NULL)
30301+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30302+ return h->info_kbuf;
30303+ h->info_kbuf[0] = '\0';
30304+
30305+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30306+ h->info_kbuf[size-1] = '\0';
30307
30308 return h->info_kbuf;
30309 }
30310diff -urNp linux-3.1.1/drivers/message/i2o/i2o_config.c linux-3.1.1/drivers/message/i2o/i2o_config.c
30311--- linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-11 15:19:27.000000000 -0500
30312+++ linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-16 18:40:10.000000000 -0500
30313@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
30314 struct i2o_message *msg;
30315 unsigned int iop;
30316
30317+ pax_track_stack();
30318+
30319 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
30320 return -EFAULT;
30321
30322diff -urNp linux-3.1.1/drivers/message/i2o/i2o_proc.c linux-3.1.1/drivers/message/i2o/i2o_proc.c
30323--- linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-11 15:19:27.000000000 -0500
30324+++ linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-16 18:39:07.000000000 -0500
30325@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
30326 "Array Controller Device"
30327 };
30328
30329-static char *chtostr(u8 * chars, int n)
30330-{
30331- char tmp[256];
30332- tmp[0] = 0;
30333- return strncat(tmp, (char *)chars, n);
30334-}
30335-
30336 static int i2o_report_query_status(struct seq_file *seq, int block_status,
30337 char *group)
30338 {
30339@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
30340
30341 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
30342 seq_printf(seq, "%-#8x", ddm_table.module_id);
30343- seq_printf(seq, "%-29s",
30344- chtostr(ddm_table.module_name_version, 28));
30345+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
30346 seq_printf(seq, "%9d ", ddm_table.data_size);
30347 seq_printf(seq, "%8d", ddm_table.code_size);
30348
30349@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
30350
30351 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
30352 seq_printf(seq, "%-#8x", dst->module_id);
30353- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
30354- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
30355+ seq_printf(seq, "%-.28s", dst->module_name_version);
30356+ seq_printf(seq, "%-.8s", dst->date);
30357 seq_printf(seq, "%8d ", dst->module_size);
30358 seq_printf(seq, "%8d ", dst->mpb_size);
30359 seq_printf(seq, "0x%04x", dst->module_flags);
30360@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
30361 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
30362 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
30363 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
30364- seq_printf(seq, "Vendor info : %s\n",
30365- chtostr((u8 *) (work32 + 2), 16));
30366- seq_printf(seq, "Product info : %s\n",
30367- chtostr((u8 *) (work32 + 6), 16));
30368- seq_printf(seq, "Description : %s\n",
30369- chtostr((u8 *) (work32 + 10), 16));
30370- seq_printf(seq, "Product rev. : %s\n",
30371- chtostr((u8 *) (work32 + 14), 8));
30372+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30373+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30374+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30375+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30376
30377 seq_printf(seq, "Serial number : ");
30378 print_serial_number(seq, (u8 *) (work32 + 16),
30379@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30380 }
30381
30382 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30383- seq_printf(seq, "Module name : %s\n",
30384- chtostr(result.module_name, 24));
30385- seq_printf(seq, "Module revision : %s\n",
30386- chtostr(result.module_rev, 8));
30387+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
30388+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30389
30390 seq_printf(seq, "Serial number : ");
30391 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30392@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30393 return 0;
30394 }
30395
30396- seq_printf(seq, "Device name : %s\n",
30397- chtostr(result.device_name, 64));
30398- seq_printf(seq, "Service name : %s\n",
30399- chtostr(result.service_name, 64));
30400- seq_printf(seq, "Physical name : %s\n",
30401- chtostr(result.physical_location, 64));
30402- seq_printf(seq, "Instance number : %s\n",
30403- chtostr(result.instance_number, 4));
30404+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
30405+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
30406+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30407+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30408
30409 return 0;
30410 }
30411diff -urNp linux-3.1.1/drivers/message/i2o/iop.c linux-3.1.1/drivers/message/i2o/iop.c
30412--- linux-3.1.1/drivers/message/i2o/iop.c 2011-11-11 15:19:27.000000000 -0500
30413+++ linux-3.1.1/drivers/message/i2o/iop.c 2011-11-16 18:39:07.000000000 -0500
30414@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30415
30416 spin_lock_irqsave(&c->context_list_lock, flags);
30417
30418- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30419- atomic_inc(&c->context_list_counter);
30420+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30421+ atomic_inc_unchecked(&c->context_list_counter);
30422
30423- entry->context = atomic_read(&c->context_list_counter);
30424+ entry->context = atomic_read_unchecked(&c->context_list_counter);
30425
30426 list_add(&entry->list, &c->context_list);
30427
30428@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30429
30430 #if BITS_PER_LONG == 64
30431 spin_lock_init(&c->context_list_lock);
30432- atomic_set(&c->context_list_counter, 0);
30433+ atomic_set_unchecked(&c->context_list_counter, 0);
30434 INIT_LIST_HEAD(&c->context_list);
30435 #endif
30436
30437diff -urNp linux-3.1.1/drivers/mfd/ab3100-core.c linux-3.1.1/drivers/mfd/ab3100-core.c
30438--- linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-11 15:19:27.000000000 -0500
30439+++ linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-16 18:39:07.000000000 -0500
30440@@ -809,7 +809,7 @@ struct ab_family_id {
30441 char *name;
30442 };
30443
30444-static const struct ab_family_id ids[] __devinitdata = {
30445+static const struct ab_family_id ids[] __devinitconst = {
30446 /* AB3100 */
30447 {
30448 .id = 0xc0,
30449diff -urNp linux-3.1.1/drivers/mfd/abx500-core.c linux-3.1.1/drivers/mfd/abx500-core.c
30450--- linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-11 15:19:27.000000000 -0500
30451+++ linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-16 18:39:07.000000000 -0500
30452@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30453
30454 struct abx500_device_entry {
30455 struct list_head list;
30456- struct abx500_ops ops;
30457+ abx500_ops_no_const ops;
30458 struct device *dev;
30459 };
30460
30461diff -urNp linux-3.1.1/drivers/mfd/janz-cmodio.c linux-3.1.1/drivers/mfd/janz-cmodio.c
30462--- linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-11 15:19:27.000000000 -0500
30463+++ linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-16 18:39:07.000000000 -0500
30464@@ -13,6 +13,7 @@
30465
30466 #include <linux/kernel.h>
30467 #include <linux/module.h>
30468+#include <linux/slab.h>
30469 #include <linux/init.h>
30470 #include <linux/pci.h>
30471 #include <linux/interrupt.h>
30472diff -urNp linux-3.1.1/drivers/mfd/wm8350-i2c.c linux-3.1.1/drivers/mfd/wm8350-i2c.c
30473--- linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-11 15:19:27.000000000 -0500
30474+++ linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-16 18:40:10.000000000 -0500
30475@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30476 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30477 int ret;
30478
30479+ pax_track_stack();
30480+
30481 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30482 return -EINVAL;
30483
30484diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c
30485--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-11 15:19:27.000000000 -0500
30486+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-16 18:39:07.000000000 -0500
30487@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30488 * the lid is closed. This leads to interrupts as soon as a little move
30489 * is done.
30490 */
30491- atomic_inc(&lis3_dev.count);
30492+ atomic_inc_unchecked(&lis3_dev.count);
30493
30494 wake_up_interruptible(&lis3_dev.misc_wait);
30495 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30496@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30497 if (lis3_dev.pm_dev)
30498 pm_runtime_get_sync(lis3_dev.pm_dev);
30499
30500- atomic_set(&lis3_dev.count, 0);
30501+ atomic_set_unchecked(&lis3_dev.count, 0);
30502 return 0;
30503 }
30504
30505@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30506 add_wait_queue(&lis3_dev.misc_wait, &wait);
30507 while (true) {
30508 set_current_state(TASK_INTERRUPTIBLE);
30509- data = atomic_xchg(&lis3_dev.count, 0);
30510+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30511 if (data)
30512 break;
30513
30514@@ -585,7 +585,7 @@ out:
30515 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30516 {
30517 poll_wait(file, &lis3_dev.misc_wait, wait);
30518- if (atomic_read(&lis3_dev.count))
30519+ if (atomic_read_unchecked(&lis3_dev.count))
30520 return POLLIN | POLLRDNORM;
30521 return 0;
30522 }
30523diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h
30524--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-11 15:19:27.000000000 -0500
30525+++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-16 18:39:07.000000000 -0500
30526@@ -265,7 +265,7 @@ struct lis3lv02d {
30527 struct input_polled_dev *idev; /* input device */
30528 struct platform_device *pdev; /* platform device */
30529 struct regulator_bulk_data regulators[2];
30530- atomic_t count; /* interrupt count after last read */
30531+ atomic_unchecked_t count; /* interrupt count after last read */
30532 union axis_conversion ac; /* hw -> logical axis */
30533 int mapped_btns[3];
30534
30535diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c
30536--- linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-11 15:19:27.000000000 -0500
30537+++ linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-16 18:39:07.000000000 -0500
30538@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30539 unsigned long nsec;
30540
30541 nsec = CLKS2NSEC(clks);
30542- atomic_long_inc(&mcs_op_statistics[op].count);
30543- atomic_long_add(nsec, &mcs_op_statistics[op].total);
30544+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30545+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30546 if (mcs_op_statistics[op].max < nsec)
30547 mcs_op_statistics[op].max = nsec;
30548 }
30549diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c
30550--- linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-11 15:19:27.000000000 -0500
30551+++ linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-16 18:39:07.000000000 -0500
30552@@ -32,9 +32,9 @@
30553
30554 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30555
30556-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30557+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30558 {
30559- unsigned long val = atomic_long_read(v);
30560+ unsigned long val = atomic_long_read_unchecked(v);
30561
30562 seq_printf(s, "%16lu %s\n", val, id);
30563 }
30564@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30565
30566 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30567 for (op = 0; op < mcsop_last; op++) {
30568- count = atomic_long_read(&mcs_op_statistics[op].count);
30569- total = atomic_long_read(&mcs_op_statistics[op].total);
30570+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30571+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30572 max = mcs_op_statistics[op].max;
30573 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30574 count ? total / count : 0, max);
30575diff -urNp linux-3.1.1/drivers/misc/sgi-gru/grutables.h linux-3.1.1/drivers/misc/sgi-gru/grutables.h
30576--- linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-11 15:19:27.000000000 -0500
30577+++ linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-16 18:39:07.000000000 -0500
30578@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30579 * GRU statistics.
30580 */
30581 struct gru_stats_s {
30582- atomic_long_t vdata_alloc;
30583- atomic_long_t vdata_free;
30584- atomic_long_t gts_alloc;
30585- atomic_long_t gts_free;
30586- atomic_long_t gms_alloc;
30587- atomic_long_t gms_free;
30588- atomic_long_t gts_double_allocate;
30589- atomic_long_t assign_context;
30590- atomic_long_t assign_context_failed;
30591- atomic_long_t free_context;
30592- atomic_long_t load_user_context;
30593- atomic_long_t load_kernel_context;
30594- atomic_long_t lock_kernel_context;
30595- atomic_long_t unlock_kernel_context;
30596- atomic_long_t steal_user_context;
30597- atomic_long_t steal_kernel_context;
30598- atomic_long_t steal_context_failed;
30599- atomic_long_t nopfn;
30600- atomic_long_t asid_new;
30601- atomic_long_t asid_next;
30602- atomic_long_t asid_wrap;
30603- atomic_long_t asid_reuse;
30604- atomic_long_t intr;
30605- atomic_long_t intr_cbr;
30606- atomic_long_t intr_tfh;
30607- atomic_long_t intr_spurious;
30608- atomic_long_t intr_mm_lock_failed;
30609- atomic_long_t call_os;
30610- atomic_long_t call_os_wait_queue;
30611- atomic_long_t user_flush_tlb;
30612- atomic_long_t user_unload_context;
30613- atomic_long_t user_exception;
30614- atomic_long_t set_context_option;
30615- atomic_long_t check_context_retarget_intr;
30616- atomic_long_t check_context_unload;
30617- atomic_long_t tlb_dropin;
30618- atomic_long_t tlb_preload_page;
30619- atomic_long_t tlb_dropin_fail_no_asid;
30620- atomic_long_t tlb_dropin_fail_upm;
30621- atomic_long_t tlb_dropin_fail_invalid;
30622- atomic_long_t tlb_dropin_fail_range_active;
30623- atomic_long_t tlb_dropin_fail_idle;
30624- atomic_long_t tlb_dropin_fail_fmm;
30625- atomic_long_t tlb_dropin_fail_no_exception;
30626- atomic_long_t tfh_stale_on_fault;
30627- atomic_long_t mmu_invalidate_range;
30628- atomic_long_t mmu_invalidate_page;
30629- atomic_long_t flush_tlb;
30630- atomic_long_t flush_tlb_gru;
30631- atomic_long_t flush_tlb_gru_tgh;
30632- atomic_long_t flush_tlb_gru_zero_asid;
30633-
30634- atomic_long_t copy_gpa;
30635- atomic_long_t read_gpa;
30636-
30637- atomic_long_t mesq_receive;
30638- atomic_long_t mesq_receive_none;
30639- atomic_long_t mesq_send;
30640- atomic_long_t mesq_send_failed;
30641- atomic_long_t mesq_noop;
30642- atomic_long_t mesq_send_unexpected_error;
30643- atomic_long_t mesq_send_lb_overflow;
30644- atomic_long_t mesq_send_qlimit_reached;
30645- atomic_long_t mesq_send_amo_nacked;
30646- atomic_long_t mesq_send_put_nacked;
30647- atomic_long_t mesq_page_overflow;
30648- atomic_long_t mesq_qf_locked;
30649- atomic_long_t mesq_qf_noop_not_full;
30650- atomic_long_t mesq_qf_switch_head_failed;
30651- atomic_long_t mesq_qf_unexpected_error;
30652- atomic_long_t mesq_noop_unexpected_error;
30653- atomic_long_t mesq_noop_lb_overflow;
30654- atomic_long_t mesq_noop_qlimit_reached;
30655- atomic_long_t mesq_noop_amo_nacked;
30656- atomic_long_t mesq_noop_put_nacked;
30657- atomic_long_t mesq_noop_page_overflow;
30658+ atomic_long_unchecked_t vdata_alloc;
30659+ atomic_long_unchecked_t vdata_free;
30660+ atomic_long_unchecked_t gts_alloc;
30661+ atomic_long_unchecked_t gts_free;
30662+ atomic_long_unchecked_t gms_alloc;
30663+ atomic_long_unchecked_t gms_free;
30664+ atomic_long_unchecked_t gts_double_allocate;
30665+ atomic_long_unchecked_t assign_context;
30666+ atomic_long_unchecked_t assign_context_failed;
30667+ atomic_long_unchecked_t free_context;
30668+ atomic_long_unchecked_t load_user_context;
30669+ atomic_long_unchecked_t load_kernel_context;
30670+ atomic_long_unchecked_t lock_kernel_context;
30671+ atomic_long_unchecked_t unlock_kernel_context;
30672+ atomic_long_unchecked_t steal_user_context;
30673+ atomic_long_unchecked_t steal_kernel_context;
30674+ atomic_long_unchecked_t steal_context_failed;
30675+ atomic_long_unchecked_t nopfn;
30676+ atomic_long_unchecked_t asid_new;
30677+ atomic_long_unchecked_t asid_next;
30678+ atomic_long_unchecked_t asid_wrap;
30679+ atomic_long_unchecked_t asid_reuse;
30680+ atomic_long_unchecked_t intr;
30681+ atomic_long_unchecked_t intr_cbr;
30682+ atomic_long_unchecked_t intr_tfh;
30683+ atomic_long_unchecked_t intr_spurious;
30684+ atomic_long_unchecked_t intr_mm_lock_failed;
30685+ atomic_long_unchecked_t call_os;
30686+ atomic_long_unchecked_t call_os_wait_queue;
30687+ atomic_long_unchecked_t user_flush_tlb;
30688+ atomic_long_unchecked_t user_unload_context;
30689+ atomic_long_unchecked_t user_exception;
30690+ atomic_long_unchecked_t set_context_option;
30691+ atomic_long_unchecked_t check_context_retarget_intr;
30692+ atomic_long_unchecked_t check_context_unload;
30693+ atomic_long_unchecked_t tlb_dropin;
30694+ atomic_long_unchecked_t tlb_preload_page;
30695+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30696+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30697+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30698+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30699+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30700+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30701+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30702+ atomic_long_unchecked_t tfh_stale_on_fault;
30703+ atomic_long_unchecked_t mmu_invalidate_range;
30704+ atomic_long_unchecked_t mmu_invalidate_page;
30705+ atomic_long_unchecked_t flush_tlb;
30706+ atomic_long_unchecked_t flush_tlb_gru;
30707+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30708+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30709+
30710+ atomic_long_unchecked_t copy_gpa;
30711+ atomic_long_unchecked_t read_gpa;
30712+
30713+ atomic_long_unchecked_t mesq_receive;
30714+ atomic_long_unchecked_t mesq_receive_none;
30715+ atomic_long_unchecked_t mesq_send;
30716+ atomic_long_unchecked_t mesq_send_failed;
30717+ atomic_long_unchecked_t mesq_noop;
30718+ atomic_long_unchecked_t mesq_send_unexpected_error;
30719+ atomic_long_unchecked_t mesq_send_lb_overflow;
30720+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30721+ atomic_long_unchecked_t mesq_send_amo_nacked;
30722+ atomic_long_unchecked_t mesq_send_put_nacked;
30723+ atomic_long_unchecked_t mesq_page_overflow;
30724+ atomic_long_unchecked_t mesq_qf_locked;
30725+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30726+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30727+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30728+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30729+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30730+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30731+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30732+ atomic_long_unchecked_t mesq_noop_put_nacked;
30733+ atomic_long_unchecked_t mesq_noop_page_overflow;
30734
30735 };
30736
30737@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30738 tghop_invalidate, mcsop_last};
30739
30740 struct mcs_op_statistic {
30741- atomic_long_t count;
30742- atomic_long_t total;
30743+ atomic_long_unchecked_t count;
30744+ atomic_long_unchecked_t total;
30745 unsigned long max;
30746 };
30747
30748@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30749
30750 #define STAT(id) do { \
30751 if (gru_options & OPT_STATS) \
30752- atomic_long_inc(&gru_stats.id); \
30753+ atomic_long_inc_unchecked(&gru_stats.id); \
30754 } while (0)
30755
30756 #ifdef CONFIG_SGI_GRU_DEBUG
30757diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc.h linux-3.1.1/drivers/misc/sgi-xp/xpc.h
30758--- linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-11 15:19:27.000000000 -0500
30759+++ linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-16 18:39:07.000000000 -0500
30760@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30761 void (*received_payload) (struct xpc_channel *, void *);
30762 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30763 };
30764+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30765
30766 /* struct xpc_partition act_state values (for XPC HB) */
30767
30768@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30769 /* found in xpc_main.c */
30770 extern struct device *xpc_part;
30771 extern struct device *xpc_chan;
30772-extern struct xpc_arch_operations xpc_arch_ops;
30773+extern xpc_arch_operations_no_const xpc_arch_ops;
30774 extern int xpc_disengage_timelimit;
30775 extern int xpc_disengage_timedout;
30776 extern int xpc_activate_IRQ_rcvd;
30777diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c
30778--- linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-11 15:19:27.000000000 -0500
30779+++ linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-16 18:39:07.000000000 -0500
30780@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30781 .notifier_call = xpc_system_die,
30782 };
30783
30784-struct xpc_arch_operations xpc_arch_ops;
30785+xpc_arch_operations_no_const xpc_arch_ops;
30786
30787 /*
30788 * Timer function to enforce the timelimit on the partition disengage.
30789diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xp.h linux-3.1.1/drivers/misc/sgi-xp/xp.h
30790--- linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-11 15:19:27.000000000 -0500
30791+++ linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-16 18:39:07.000000000 -0500
30792@@ -289,7 +289,7 @@ struct xpc_interface {
30793 xpc_notify_func, void *);
30794 void (*received) (short, int, void *);
30795 enum xp_retval (*partid_to_nasids) (short, void *);
30796-};
30797+} __no_const;
30798
30799 extern struct xpc_interface xpc_interface;
30800
30801diff -urNp linux-3.1.1/drivers/mmc/host/sdhci-pci.c linux-3.1.1/drivers/mmc/host/sdhci-pci.c
30802--- linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-11 15:19:27.000000000 -0500
30803+++ linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-16 18:39:07.000000000 -0500
30804@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhc
30805 .probe = via_probe,
30806 };
30807
30808-static const struct pci_device_id pci_ids[] __devinitdata = {
30809+static const struct pci_device_id pci_ids[] __devinitconst = {
30810 {
30811 .vendor = PCI_VENDOR_ID_RICOH,
30812 .device = PCI_DEVICE_ID_RICOH_R5C822,
30813diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c
30814--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-11 15:19:27.000000000 -0500
30815+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-16 18:40:10.000000000 -0500
30816@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30817 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30818 unsigned long timeo = jiffies + HZ;
30819
30820+ pax_track_stack();
30821+
30822 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30823 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30824 goto sleep;
30825@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30826 unsigned long initial_adr;
30827 int initial_len = len;
30828
30829+ pax_track_stack();
30830+
30831 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30832 adr += chip->start;
30833 initial_adr = adr;
30834@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30835 int retries = 3;
30836 int ret;
30837
30838+ pax_track_stack();
30839+
30840 adr += chip->start;
30841
30842 retry:
30843diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c
30844--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-11 15:19:27.000000000 -0500
30845+++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-16 18:40:10.000000000 -0500
30846@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30847 unsigned long cmd_addr;
30848 struct cfi_private *cfi = map->fldrv_priv;
30849
30850+ pax_track_stack();
30851+
30852 adr += chip->start;
30853
30854 /* Ensure cmd read/writes are aligned. */
30855@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30856 DECLARE_WAITQUEUE(wait, current);
30857 int wbufsize, z;
30858
30859+ pax_track_stack();
30860+
30861 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30862 if (adr & (map_bankwidth(map)-1))
30863 return -EINVAL;
30864@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30865 DECLARE_WAITQUEUE(wait, current);
30866 int ret = 0;
30867
30868+ pax_track_stack();
30869+
30870 adr += chip->start;
30871
30872 /* Let's determine this according to the interleave only once */
30873@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30874 unsigned long timeo = jiffies + HZ;
30875 DECLARE_WAITQUEUE(wait, current);
30876
30877+ pax_track_stack();
30878+
30879 adr += chip->start;
30880
30881 /* Let's determine this according to the interleave only once */
30882@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30883 unsigned long timeo = jiffies + HZ;
30884 DECLARE_WAITQUEUE(wait, current);
30885
30886+ pax_track_stack();
30887+
30888 adr += chip->start;
30889
30890 /* Let's determine this according to the interleave only once */
30891diff -urNp linux-3.1.1/drivers/mtd/devices/doc2000.c linux-3.1.1/drivers/mtd/devices/doc2000.c
30892--- linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-11 15:19:27.000000000 -0500
30893+++ linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-16 18:39:07.000000000 -0500
30894@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30895
30896 /* The ECC will not be calculated correctly if less than 512 is written */
30897 /* DBB-
30898- if (len != 0x200 && eccbuf)
30899+ if (len != 0x200)
30900 printk(KERN_WARNING
30901 "ECC needs a full sector write (adr: %lx size %lx)\n",
30902 (long) to, (long) len);
30903diff -urNp linux-3.1.1/drivers/mtd/devices/doc2001.c linux-3.1.1/drivers/mtd/devices/doc2001.c
30904--- linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-11 15:19:27.000000000 -0500
30905+++ linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-16 18:39:07.000000000 -0500
30906@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30907 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30908
30909 /* Don't allow read past end of device */
30910- if (from >= this->totlen)
30911+ if (from >= this->totlen || !len)
30912 return -EINVAL;
30913
30914 /* Don't allow a single read to cross a 512-byte block boundary */
30915diff -urNp linux-3.1.1/drivers/mtd/ftl.c linux-3.1.1/drivers/mtd/ftl.c
30916--- linux-3.1.1/drivers/mtd/ftl.c 2011-11-11 15:19:27.000000000 -0500
30917+++ linux-3.1.1/drivers/mtd/ftl.c 2011-11-16 18:40:10.000000000 -0500
30918@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30919 loff_t offset;
30920 uint16_t srcunitswap = cpu_to_le16(srcunit);
30921
30922+ pax_track_stack();
30923+
30924 eun = &part->EUNInfo[srcunit];
30925 xfer = &part->XferInfo[xferunit];
30926 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30927diff -urNp linux-3.1.1/drivers/mtd/inftlcore.c linux-3.1.1/drivers/mtd/inftlcore.c
30928--- linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-11 15:19:27.000000000 -0500
30929+++ linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-16 18:40:10.000000000 -0500
30930@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30931 struct inftl_oob oob;
30932 size_t retlen;
30933
30934+ pax_track_stack();
30935+
30936 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30937 "pending=%d)\n", inftl, thisVUC, pendingblock);
30938
30939diff -urNp linux-3.1.1/drivers/mtd/inftlmount.c linux-3.1.1/drivers/mtd/inftlmount.c
30940--- linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-11 15:19:27.000000000 -0500
30941+++ linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-16 18:40:10.000000000 -0500
30942@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30943 struct INFTLPartition *ip;
30944 size_t retlen;
30945
30946+ pax_track_stack();
30947+
30948 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30949
30950 /*
30951diff -urNp linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c
30952--- linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-11 15:19:27.000000000 -0500
30953+++ linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-16 18:40:10.000000000 -0500
30954@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30955 {
30956 map_word pfow_val[4];
30957
30958+ pax_track_stack();
30959+
30960 /* Check identification string */
30961 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30962 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30963diff -urNp linux-3.1.1/drivers/mtd/mtdchar.c linux-3.1.1/drivers/mtd/mtdchar.c
30964--- linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-11 15:19:27.000000000 -0500
30965+++ linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-16 18:40:10.000000000 -0500
30966@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file,
30967 u_long size;
30968 struct mtd_info_user info;
30969
30970+ pax_track_stack();
30971+
30972 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30973
30974 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30975diff -urNp linux-3.1.1/drivers/mtd/nand/denali.c linux-3.1.1/drivers/mtd/nand/denali.c
30976--- linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-11 15:19:27.000000000 -0500
30977+++ linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-16 18:39:07.000000000 -0500
30978@@ -26,6 +26,7 @@
30979 #include <linux/pci.h>
30980 #include <linux/mtd/mtd.h>
30981 #include <linux/module.h>
30982+#include <linux/slab.h>
30983
30984 #include "denali.h"
30985
30986diff -urNp linux-3.1.1/drivers/mtd/nftlcore.c linux-3.1.1/drivers/mtd/nftlcore.c
30987--- linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-11 15:19:27.000000000 -0500
30988+++ linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-16 18:40:10.000000000 -0500
30989@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30990 int inplace = 1;
30991 size_t retlen;
30992
30993+ pax_track_stack();
30994+
30995 memset(BlockMap, 0xff, sizeof(BlockMap));
30996 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30997
30998diff -urNp linux-3.1.1/drivers/mtd/nftlmount.c linux-3.1.1/drivers/mtd/nftlmount.c
30999--- linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-11 15:19:27.000000000 -0500
31000+++ linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-16 18:40:10.000000000 -0500
31001@@ -24,6 +24,7 @@
31002 #include <asm/errno.h>
31003 #include <linux/delay.h>
31004 #include <linux/slab.h>
31005+#include <linux/sched.h>
31006 #include <linux/mtd/mtd.h>
31007 #include <linux/mtd/nand.h>
31008 #include <linux/mtd/nftl.h>
31009@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
31010 struct mtd_info *mtd = nftl->mbd.mtd;
31011 unsigned int i;
31012
31013+ pax_track_stack();
31014+
31015 /* Assume logical EraseSize == physical erasesize for starting the scan.
31016 We'll sort it out later if we find a MediaHeader which says otherwise */
31017 /* Actually, we won't. The new DiskOnChip driver has already scanned
31018diff -urNp linux-3.1.1/drivers/mtd/ubi/build.c linux-3.1.1/drivers/mtd/ubi/build.c
31019--- linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-11 15:19:27.000000000 -0500
31020+++ linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-16 18:39:07.000000000 -0500
31021@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
31022 static int __init bytes_str_to_int(const char *str)
31023 {
31024 char *endp;
31025- unsigned long result;
31026+ unsigned long result, scale = 1;
31027
31028 result = simple_strtoul(str, &endp, 0);
31029 if (str == endp || result >= INT_MAX) {
31030@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const
31031
31032 switch (*endp) {
31033 case 'G':
31034- result *= 1024;
31035+ scale *= 1024;
31036 case 'M':
31037- result *= 1024;
31038+ scale *= 1024;
31039 case 'K':
31040- result *= 1024;
31041+ scale *= 1024;
31042 if (endp[1] == 'i' && endp[2] == 'B')
31043 endp += 2;
31044 case '\0':
31045@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const
31046 return -EINVAL;
31047 }
31048
31049- return result;
31050+ if ((intoverflow_t)result*scale >= INT_MAX) {
31051+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31052+ str);
31053+ return -EINVAL;
31054+ }
31055+
31056+ return result*scale;
31057 }
31058
31059 /**
31060diff -urNp linux-3.1.1/drivers/net/atlx/atl2.c linux-3.1.1/drivers/net/atlx/atl2.c
31061--- linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-11 15:19:27.000000000 -0500
31062+++ linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-16 18:39:07.000000000 -0500
31063@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw
31064 */
31065
31066 #define ATL2_PARAM(X, desc) \
31067- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31068+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31069 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31070 MODULE_PARM_DESC(X, desc);
31071 #else
31072diff -urNp linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c
31073--- linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-11 15:19:27.000000000 -0500
31074+++ linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-16 18:39:07.000000000 -0500
31075@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31076 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31077 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31078
31079-static struct bfa_ioc_hwif nw_hwif_ct;
31080+static struct bfa_ioc_hwif nw_hwif_ct = {
31081+ .ioc_pll_init = bfa_ioc_ct_pll_init,
31082+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31083+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31084+ .ioc_reg_init = bfa_ioc_ct_reg_init,
31085+ .ioc_map_port = bfa_ioc_ct_map_port,
31086+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31087+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31088+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31089+ .ioc_sync_start = bfa_ioc_ct_sync_start,
31090+ .ioc_sync_join = bfa_ioc_ct_sync_join,
31091+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31092+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31093+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
31094+};
31095
31096 /**
31097 * Called from bfa_ioc_attach() to map asic specific calls.
31098@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31099 void
31100 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31101 {
31102- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31103- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31104- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31105- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31106- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31107- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31108- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31109- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31110- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31111- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31112- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31113- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31114- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31115-
31116 ioc->ioc_hwif = &nw_hwif_ct;
31117 }
31118
31119diff -urNp linux-3.1.1/drivers/net/bna/bnad.c linux-3.1.1/drivers/net/bna/bnad.c
31120--- linux-3.1.1/drivers/net/bna/bnad.c 2011-11-11 15:19:27.000000000 -0500
31121+++ linux-3.1.1/drivers/net/bna/bnad.c 2011-11-16 18:39:07.000000000 -0500
31122@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31123 struct bna_intr_info *intr_info =
31124 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31125 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31126- struct bna_tx_event_cbfn tx_cbfn;
31127+ static struct bna_tx_event_cbfn tx_cbfn = {
31128+ /* Initialize the tx event handlers */
31129+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
31130+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31131+ .tx_stall_cbfn = bnad_cb_tx_stall,
31132+ .tx_resume_cbfn = bnad_cb_tx_resume,
31133+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31134+ };
31135 struct bna_tx *tx;
31136 unsigned long flags;
31137
31138@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31139 tx_config->txq_depth = bnad->txq_depth;
31140 tx_config->tx_type = BNA_TX_T_REGULAR;
31141
31142- /* Initialize the tx event handlers */
31143- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31144- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31145- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31146- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31147- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31148-
31149 /* Get BNA's resource requirement for one tx object */
31150 spin_lock_irqsave(&bnad->bna_lock, flags);
31151 bna_tx_res_req(bnad->num_txq_per_tx,
31152@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31153 struct bna_intr_info *intr_info =
31154 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31155 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31156- struct bna_rx_event_cbfn rx_cbfn;
31157+ static struct bna_rx_event_cbfn rx_cbfn = {
31158+ /* Initialize the Rx event handlers */
31159+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
31160+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31161+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
31162+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31163+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31164+ .rx_post_cbfn = bnad_cb_rx_post
31165+ };
31166 struct bna_rx *rx;
31167 unsigned long flags;
31168
31169 /* Initialize the Rx object configuration */
31170 bnad_init_rx_config(bnad, rx_config);
31171
31172- /* Initialize the Rx event handlers */
31173- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31174- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31175- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31176- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31177- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31178- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31179-
31180 /* Get BNA's resource requirement for one Rx object */
31181 spin_lock_irqsave(&bnad->bna_lock, flags);
31182 bna_rx_res_req(rx_config, res_info);
31183diff -urNp linux-3.1.1/drivers/net/bnx2.c linux-3.1.1/drivers/net/bnx2.c
31184--- linux-3.1.1/drivers/net/bnx2.c 2011-11-11 15:19:27.000000000 -0500
31185+++ linux-3.1.1/drivers/net/bnx2.c 2011-11-16 18:40:11.000000000 -0500
31186@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31187 int rc = 0;
31188 u32 magic, csum;
31189
31190+ pax_track_stack();
31191+
31192 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31193 goto test_nvram_done;
31194
31195diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c
31196--- linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-11 15:19:27.000000000 -0500
31197+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-16 18:40:11.000000000 -0500
31198@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x
31199 int i, rc;
31200 u32 magic, crc;
31201
31202+ pax_track_stack();
31203+
31204 if (BP_NOMCP(bp))
31205 return 0;
31206
31207diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h
31208--- linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-11 15:19:27.000000000 -0500
31209+++ linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-16 18:39:07.000000000 -0500
31210@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
31211
31212 int (*wait_comp)(struct bnx2x *bp,
31213 struct bnx2x_rx_mode_ramrod_params *p);
31214-};
31215+} __no_const;
31216
31217 /********************** Set multicast group ***********************************/
31218
31219diff -urNp linux-3.1.1/drivers/net/cxgb3/l2t.h linux-3.1.1/drivers/net/cxgb3/l2t.h
31220--- linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-11 15:19:27.000000000 -0500
31221+++ linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-16 18:39:07.000000000 -0500
31222@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31223 */
31224 struct l2t_skb_cb {
31225 arp_failure_handler_func arp_failure_handler;
31226-};
31227+} __no_const;
31228
31229 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31230
31231diff -urNp linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c
31232--- linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-11 15:19:27.000000000 -0500
31233+++ linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-16 18:40:22.000000000 -0500
31234@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
31235 unsigned int nchan = adap->params.nports;
31236 struct msix_entry entries[MAX_INGQ + 1];
31237
31238+ pax_track_stack();
31239+
31240 for (i = 0; i < ARRAY_SIZE(entries); ++i)
31241 entries[i].entry = i;
31242
31243diff -urNp linux-3.1.1/drivers/net/cxgb4/t4_hw.c linux-3.1.1/drivers/net/cxgb4/t4_hw.c
31244--- linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-11 15:19:27.000000000 -0500
31245+++ linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-16 18:40:22.000000000 -0500
31246@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
31247 u8 vpd[VPD_LEN], csum;
31248 unsigned int vpdr_len, kw_offset, id_len;
31249
31250+ pax_track_stack();
31251+
31252 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
31253 if (ret < 0)
31254 return ret;
31255diff -urNp linux-3.1.1/drivers/net/e1000e/82571.c linux-3.1.1/drivers/net/e1000e/82571.c
31256--- linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-11 15:19:27.000000000 -0500
31257+++ linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-16 18:39:07.000000000 -0500
31258@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
31259 {
31260 struct e1000_hw *hw = &adapter->hw;
31261 struct e1000_mac_info *mac = &hw->mac;
31262- struct e1000_mac_operations *func = &mac->ops;
31263+ e1000_mac_operations_no_const *func = &mac->ops;
31264 u32 swsm = 0;
31265 u32 swsm2 = 0;
31266 bool force_clear_smbi = false;
31267diff -urNp linux-3.1.1/drivers/net/e1000e/es2lan.c linux-3.1.1/drivers/net/e1000e/es2lan.c
31268--- linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-11 15:19:27.000000000 -0500
31269+++ linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-16 18:39:07.000000000 -0500
31270@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
31271 {
31272 struct e1000_hw *hw = &adapter->hw;
31273 struct e1000_mac_info *mac = &hw->mac;
31274- struct e1000_mac_operations *func = &mac->ops;
31275+ e1000_mac_operations_no_const *func = &mac->ops;
31276
31277 /* Set media type */
31278 switch (adapter->pdev->device) {
31279diff -urNp linux-3.1.1/drivers/net/e1000e/hw.h linux-3.1.1/drivers/net/e1000e/hw.h
31280--- linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-11 15:19:27.000000000 -0500
31281+++ linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-16 18:39:07.000000000 -0500
31282@@ -778,6 +778,7 @@ struct e1000_mac_operations {
31283 void (*write_vfta)(struct e1000_hw *, u32, u32);
31284 s32 (*read_mac_addr)(struct e1000_hw *);
31285 };
31286+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31287
31288 /*
31289 * When to use various PHY register access functions:
31290@@ -818,6 +819,7 @@ struct e1000_phy_operations {
31291 void (*power_up)(struct e1000_hw *);
31292 void (*power_down)(struct e1000_hw *);
31293 };
31294+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31295
31296 /* Function pointers for the NVM. */
31297 struct e1000_nvm_operations {
31298@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
31299 s32 (*validate)(struct e1000_hw *);
31300 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31301 };
31302+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31303
31304 struct e1000_mac_info {
31305- struct e1000_mac_operations ops;
31306+ e1000_mac_operations_no_const ops;
31307 u8 addr[ETH_ALEN];
31308 u8 perm_addr[ETH_ALEN];
31309
31310@@ -872,7 +875,7 @@ struct e1000_mac_info {
31311 };
31312
31313 struct e1000_phy_info {
31314- struct e1000_phy_operations ops;
31315+ e1000_phy_operations_no_const ops;
31316
31317 enum e1000_phy_type type;
31318
31319@@ -906,7 +909,7 @@ struct e1000_phy_info {
31320 };
31321
31322 struct e1000_nvm_info {
31323- struct e1000_nvm_operations ops;
31324+ e1000_nvm_operations_no_const ops;
31325
31326 enum e1000_nvm_type type;
31327 enum e1000_nvm_override override;
31328diff -urNp linux-3.1.1/drivers/net/fealnx.c linux-3.1.1/drivers/net/fealnx.c
31329--- linux-3.1.1/drivers/net/fealnx.c 2011-11-11 15:19:27.000000000 -0500
31330+++ linux-3.1.1/drivers/net/fealnx.c 2011-11-16 18:39:07.000000000 -0500
31331@@ -150,7 +150,7 @@ struct chip_info {
31332 int flags;
31333 };
31334
31335-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
31336+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
31337 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31338 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
31339 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31340diff -urNp linux-3.1.1/drivers/net/hamradio/6pack.c linux-3.1.1/drivers/net/hamradio/6pack.c
31341--- linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-11 15:19:27.000000000 -0500
31342+++ linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-16 18:40:22.000000000 -0500
31343@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
31344 unsigned char buf[512];
31345 int count1;
31346
31347+ pax_track_stack();
31348+
31349 if (!count)
31350 return;
31351
31352diff -urNp linux-3.1.1/drivers/net/igb/e1000_hw.h linux-3.1.1/drivers/net/igb/e1000_hw.h
31353--- linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-11 15:19:27.000000000 -0500
31354+++ linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-16 18:39:07.000000000 -0500
31355@@ -314,6 +314,7 @@ struct e1000_mac_operations {
31356 s32 (*read_mac_addr)(struct e1000_hw *);
31357 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31358 };
31359+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31360
31361 struct e1000_phy_operations {
31362 s32 (*acquire)(struct e1000_hw *);
31363@@ -330,6 +331,7 @@ struct e1000_phy_operations {
31364 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31365 s32 (*write_reg)(struct e1000_hw *, u32, u16);
31366 };
31367+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31368
31369 struct e1000_nvm_operations {
31370 s32 (*acquire)(struct e1000_hw *);
31371@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31372 s32 (*update)(struct e1000_hw *);
31373 s32 (*validate)(struct e1000_hw *);
31374 };
31375+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31376
31377 struct e1000_info {
31378 s32 (*get_invariants)(struct e1000_hw *);
31379@@ -350,7 +353,7 @@ struct e1000_info {
31380 extern const struct e1000_info e1000_82575_info;
31381
31382 struct e1000_mac_info {
31383- struct e1000_mac_operations ops;
31384+ e1000_mac_operations_no_const ops;
31385
31386 u8 addr[6];
31387 u8 perm_addr[6];
31388@@ -388,7 +391,7 @@ struct e1000_mac_info {
31389 };
31390
31391 struct e1000_phy_info {
31392- struct e1000_phy_operations ops;
31393+ e1000_phy_operations_no_const ops;
31394
31395 enum e1000_phy_type type;
31396
31397@@ -423,7 +426,7 @@ struct e1000_phy_info {
31398 };
31399
31400 struct e1000_nvm_info {
31401- struct e1000_nvm_operations ops;
31402+ e1000_nvm_operations_no_const ops;
31403 enum e1000_nvm_type type;
31404 enum e1000_nvm_override override;
31405
31406@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31407 s32 (*check_for_ack)(struct e1000_hw *, u16);
31408 s32 (*check_for_rst)(struct e1000_hw *, u16);
31409 };
31410+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31411
31412 struct e1000_mbx_stats {
31413 u32 msgs_tx;
31414@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31415 };
31416
31417 struct e1000_mbx_info {
31418- struct e1000_mbx_operations ops;
31419+ e1000_mbx_operations_no_const ops;
31420 struct e1000_mbx_stats stats;
31421 u32 timeout;
31422 u32 usec_delay;
31423diff -urNp linux-3.1.1/drivers/net/igbvf/vf.h linux-3.1.1/drivers/net/igbvf/vf.h
31424--- linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-11 15:19:27.000000000 -0500
31425+++ linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-16 18:39:07.000000000 -0500
31426@@ -189,9 +189,10 @@ struct e1000_mac_operations {
31427 s32 (*read_mac_addr)(struct e1000_hw *);
31428 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31429 };
31430+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31431
31432 struct e1000_mac_info {
31433- struct e1000_mac_operations ops;
31434+ e1000_mac_operations_no_const ops;
31435 u8 addr[6];
31436 u8 perm_addr[6];
31437
31438@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31439 s32 (*check_for_ack)(struct e1000_hw *);
31440 s32 (*check_for_rst)(struct e1000_hw *);
31441 };
31442+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31443
31444 struct e1000_mbx_stats {
31445 u32 msgs_tx;
31446@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31447 };
31448
31449 struct e1000_mbx_info {
31450- struct e1000_mbx_operations ops;
31451+ e1000_mbx_operations_no_const ops;
31452 struct e1000_mbx_stats stats;
31453 u32 timeout;
31454 u32 usec_delay;
31455diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_main.c linux-3.1.1/drivers/net/ixgb/ixgb_main.c
31456--- linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-11 15:19:27.000000000 -0500
31457+++ linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-16 18:40:22.000000000 -0500
31458@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31459 u32 rctl;
31460 int i;
31461
31462+ pax_track_stack();
31463+
31464 /* Check for Promiscuous and All Multicast modes */
31465
31466 rctl = IXGB_READ_REG(hw, RCTL);
31467diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_param.c linux-3.1.1/drivers/net/ixgb/ixgb_param.c
31468--- linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-11 15:19:27.000000000 -0500
31469+++ linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-16 18:40:22.000000000 -0500
31470@@ -261,6 +261,9 @@ void __devinit
31471 ixgb_check_options(struct ixgb_adapter *adapter)
31472 {
31473 int bd = adapter->bd_number;
31474+
31475+ pax_track_stack();
31476+
31477 if (bd >= IXGB_MAX_NIC) {
31478 pr_notice("Warning: no configuration for board #%i\n", bd);
31479 pr_notice("Using defaults for all values\n");
31480diff -urNp linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h
31481--- linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-11 15:19:27.000000000 -0500
31482+++ linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-16 18:39:07.000000000 -0500
31483@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
31484 s32 (*update_checksum)(struct ixgbe_hw *);
31485 u16 (*calc_checksum)(struct ixgbe_hw *);
31486 };
31487+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31488
31489 struct ixgbe_mac_operations {
31490 s32 (*init_hw)(struct ixgbe_hw *);
31491@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
31492 /* Manageability interface */
31493 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
31494 };
31495+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31496
31497 struct ixgbe_phy_operations {
31498 s32 (*identify)(struct ixgbe_hw *);
31499@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
31500 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31501 s32 (*check_overtemp)(struct ixgbe_hw *);
31502 };
31503+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31504
31505 struct ixgbe_eeprom_info {
31506- struct ixgbe_eeprom_operations ops;
31507+ ixgbe_eeprom_operations_no_const ops;
31508 enum ixgbe_eeprom_type type;
31509 u32 semaphore_delay;
31510 u16 word_size;
31511@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
31512
31513 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31514 struct ixgbe_mac_info {
31515- struct ixgbe_mac_operations ops;
31516+ ixgbe_mac_operations_no_const ops;
31517 enum ixgbe_mac_type type;
31518 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31519 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31520@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
31521 };
31522
31523 struct ixgbe_phy_info {
31524- struct ixgbe_phy_operations ops;
31525+ ixgbe_phy_operations_no_const ops;
31526 struct mdio_if_info mdio;
31527 enum ixgbe_phy_type type;
31528 u32 id;
31529@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
31530 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31531 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31532 };
31533+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31534
31535 struct ixgbe_mbx_stats {
31536 u32 msgs_tx;
31537@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
31538 };
31539
31540 struct ixgbe_mbx_info {
31541- struct ixgbe_mbx_operations ops;
31542+ ixgbe_mbx_operations_no_const ops;
31543 struct ixgbe_mbx_stats stats;
31544 u32 timeout;
31545 u32 usec_delay;
31546diff -urNp linux-3.1.1/drivers/net/ixgbevf/vf.h linux-3.1.1/drivers/net/ixgbevf/vf.h
31547--- linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-11 15:19:27.000000000 -0500
31548+++ linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-16 18:39:07.000000000 -0500
31549@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31550 s32 (*clear_vfta)(struct ixgbe_hw *);
31551 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31552 };
31553+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31554
31555 enum ixgbe_mac_type {
31556 ixgbe_mac_unknown = 0,
31557@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31558 };
31559
31560 struct ixgbe_mac_info {
31561- struct ixgbe_mac_operations ops;
31562+ ixgbe_mac_operations_no_const ops;
31563 u8 addr[6];
31564 u8 perm_addr[6];
31565
31566@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31567 s32 (*check_for_ack)(struct ixgbe_hw *);
31568 s32 (*check_for_rst)(struct ixgbe_hw *);
31569 };
31570+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31571
31572 struct ixgbe_mbx_stats {
31573 u32 msgs_tx;
31574@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31575 };
31576
31577 struct ixgbe_mbx_info {
31578- struct ixgbe_mbx_operations ops;
31579+ ixgbe_mbx_operations_no_const ops;
31580 struct ixgbe_mbx_stats stats;
31581 u32 timeout;
31582 u32 udelay;
31583diff -urNp linux-3.1.1/drivers/net/ksz884x.c linux-3.1.1/drivers/net/ksz884x.c
31584--- linux-3.1.1/drivers/net/ksz884x.c 2011-11-11 15:19:27.000000000 -0500
31585+++ linux-3.1.1/drivers/net/ksz884x.c 2011-11-16 18:40:22.000000000 -0500
31586@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(str
31587 int rc;
31588 u64 counter[TOTAL_PORT_COUNTER_NUM];
31589
31590+ pax_track_stack();
31591+
31592 mutex_lock(&hw_priv->lock);
31593 n = SWITCH_PORT_NUM;
31594 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31595diff -urNp linux-3.1.1/drivers/net/mlx4/main.c linux-3.1.1/drivers/net/mlx4/main.c
31596--- linux-3.1.1/drivers/net/mlx4/main.c 2011-11-11 15:19:27.000000000 -0500
31597+++ linux-3.1.1/drivers/net/mlx4/main.c 2011-11-16 18:40:22.000000000 -0500
31598@@ -40,6 +40,7 @@
31599 #include <linux/dma-mapping.h>
31600 #include <linux/slab.h>
31601 #include <linux/io-mapping.h>
31602+#include <linux/sched.h>
31603
31604 #include <linux/mlx4/device.h>
31605 #include <linux/mlx4/doorbell.h>
31606@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev
31607 u64 icm_size;
31608 int err;
31609
31610+ pax_track_stack();
31611+
31612 err = mlx4_QUERY_FW(dev);
31613 if (err) {
31614 if (err == -EACCES)
31615diff -urNp linux-3.1.1/drivers/net/niu.c linux-3.1.1/drivers/net/niu.c
31616--- linux-3.1.1/drivers/net/niu.c 2011-11-11 15:19:27.000000000 -0500
31617+++ linux-3.1.1/drivers/net/niu.c 2011-11-16 18:40:22.000000000 -0500
31618@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struc
31619 int i, num_irqs, err;
31620 u8 first_ldg;
31621
31622+ pax_track_stack();
31623+
31624 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31625 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31626 ldg_num_map[i] = first_ldg + i;
31627diff -urNp linux-3.1.1/drivers/net/pcnet32.c linux-3.1.1/drivers/net/pcnet32.c
31628--- linux-3.1.1/drivers/net/pcnet32.c 2011-11-11 15:19:27.000000000 -0500
31629+++ linux-3.1.1/drivers/net/pcnet32.c 2011-11-16 18:39:07.000000000 -0500
31630@@ -270,7 +270,7 @@ struct pcnet32_private {
31631 struct sk_buff **rx_skbuff;
31632 dma_addr_t *tx_dma_addr;
31633 dma_addr_t *rx_dma_addr;
31634- struct pcnet32_access a;
31635+ struct pcnet32_access *a;
31636 spinlock_t lock; /* Guard lock */
31637 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31638 unsigned int rx_ring_size; /* current rx ring size */
31639@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31640 u16 val;
31641
31642 netif_wake_queue(dev);
31643- val = lp->a.read_csr(ioaddr, CSR3);
31644+ val = lp->a->read_csr(ioaddr, CSR3);
31645 val &= 0x00ff;
31646- lp->a.write_csr(ioaddr, CSR3, val);
31647+ lp->a->write_csr(ioaddr, CSR3, val);
31648 napi_enable(&lp->napi);
31649 }
31650
31651@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31652 r = mii_link_ok(&lp->mii_if);
31653 } else if (lp->chip_version >= PCNET32_79C970A) {
31654 ulong ioaddr = dev->base_addr; /* card base I/O address */
31655- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31656+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31657 } else { /* can not detect link on really old chips */
31658 r = 1;
31659 }
31660@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31661 pcnet32_netif_stop(dev);
31662
31663 spin_lock_irqsave(&lp->lock, flags);
31664- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31665+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31666
31667 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31668
31669@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31670 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31671 {
31672 struct pcnet32_private *lp = netdev_priv(dev);
31673- struct pcnet32_access *a = &lp->a; /* access to registers */
31674+ struct pcnet32_access *a = lp->a; /* access to registers */
31675 ulong ioaddr = dev->base_addr; /* card base I/O address */
31676 struct sk_buff *skb; /* sk buff */
31677 int x, i; /* counters */
31678@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31679 pcnet32_netif_stop(dev);
31680
31681 spin_lock_irqsave(&lp->lock, flags);
31682- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31683+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31684
31685 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31686
31687 /* Reset the PCNET32 */
31688- lp->a.reset(ioaddr);
31689- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31690+ lp->a->reset(ioaddr);
31691+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31692
31693 /* switch pcnet32 to 32bit mode */
31694- lp->a.write_bcr(ioaddr, 20, 2);
31695+ lp->a->write_bcr(ioaddr, 20, 2);
31696
31697 /* purge & init rings but don't actually restart */
31698 pcnet32_restart(dev, 0x0000);
31699
31700- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31701+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31702
31703 /* Initialize Transmit buffers. */
31704 size = data_len + 15;
31705@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31706
31707 /* set int loopback in CSR15 */
31708 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31709- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31710+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31711
31712 teststatus = cpu_to_le16(0x8000);
31713- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31714+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31715
31716 /* Check status of descriptors */
31717 for (x = 0; x < numbuffs; x++) {
31718@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31719 }
31720 }
31721
31722- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31723+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31724 wmb();
31725 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31726 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31727@@ -1015,7 +1015,7 @@ clean_up:
31728 pcnet32_restart(dev, CSR0_NORMAL);
31729 } else {
31730 pcnet32_purge_rx_ring(dev);
31731- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31732+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31733 }
31734 spin_unlock_irqrestore(&lp->lock, flags);
31735
31736@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31737 enum ethtool_phys_id_state state)
31738 {
31739 struct pcnet32_private *lp = netdev_priv(dev);
31740- struct pcnet32_access *a = &lp->a;
31741+ struct pcnet32_access *a = lp->a;
31742 ulong ioaddr = dev->base_addr;
31743 unsigned long flags;
31744 int i;
31745@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31746 {
31747 int csr5;
31748 struct pcnet32_private *lp = netdev_priv(dev);
31749- struct pcnet32_access *a = &lp->a;
31750+ struct pcnet32_access *a = lp->a;
31751 ulong ioaddr = dev->base_addr;
31752 int ticks;
31753
31754@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31755 spin_lock_irqsave(&lp->lock, flags);
31756 if (pcnet32_tx(dev)) {
31757 /* reset the chip to clear the error condition, then restart */
31758- lp->a.reset(ioaddr);
31759- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31760+ lp->a->reset(ioaddr);
31761+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31762 pcnet32_restart(dev, CSR0_START);
31763 netif_wake_queue(dev);
31764 }
31765@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31766 __napi_complete(napi);
31767
31768 /* clear interrupt masks */
31769- val = lp->a.read_csr(ioaddr, CSR3);
31770+ val = lp->a->read_csr(ioaddr, CSR3);
31771 val &= 0x00ff;
31772- lp->a.write_csr(ioaddr, CSR3, val);
31773+ lp->a->write_csr(ioaddr, CSR3, val);
31774
31775 /* Set interrupt enable. */
31776- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31777+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31778
31779 spin_unlock_irqrestore(&lp->lock, flags);
31780 }
31781@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31782 int i, csr0;
31783 u16 *buff = ptr;
31784 struct pcnet32_private *lp = netdev_priv(dev);
31785- struct pcnet32_access *a = &lp->a;
31786+ struct pcnet32_access *a = lp->a;
31787 ulong ioaddr = dev->base_addr;
31788 unsigned long flags;
31789
31790@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31791 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31792 if (lp->phymask & (1 << j)) {
31793 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31794- lp->a.write_bcr(ioaddr, 33,
31795+ lp->a->write_bcr(ioaddr, 33,
31796 (j << 5) | i);
31797- *buff++ = lp->a.read_bcr(ioaddr, 34);
31798+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31799 }
31800 }
31801 }
31802@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31803 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31804 lp->options |= PCNET32_PORT_FD;
31805
31806- lp->a = *a;
31807+ lp->a = a;
31808
31809 /* prior to register_netdev, dev->name is not yet correct */
31810 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31811@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31812 if (lp->mii) {
31813 /* lp->phycount and lp->phymask are set to 0 by memset above */
31814
31815- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31816+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31817 /* scan for PHYs */
31818 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31819 unsigned short id1, id2;
31820@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31821 pr_info("Found PHY %04x:%04x at address %d\n",
31822 id1, id2, i);
31823 }
31824- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31825+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31826 if (lp->phycount > 1)
31827 lp->options |= PCNET32_PORT_MII;
31828 }
31829@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31830 }
31831
31832 /* Reset the PCNET32 */
31833- lp->a.reset(ioaddr);
31834+ lp->a->reset(ioaddr);
31835
31836 /* switch pcnet32 to 32bit mode */
31837- lp->a.write_bcr(ioaddr, 20, 2);
31838+ lp->a->write_bcr(ioaddr, 20, 2);
31839
31840 netif_printk(lp, ifup, KERN_DEBUG, dev,
31841 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31842@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31843 (u32) (lp->init_dma_addr));
31844
31845 /* set/reset autoselect bit */
31846- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31847+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31848 if (lp->options & PCNET32_PORT_ASEL)
31849 val |= 2;
31850- lp->a.write_bcr(ioaddr, 2, val);
31851+ lp->a->write_bcr(ioaddr, 2, val);
31852
31853 /* handle full duplex setting */
31854 if (lp->mii_if.full_duplex) {
31855- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31856+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31857 if (lp->options & PCNET32_PORT_FD) {
31858 val |= 1;
31859 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31860@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31861 if (lp->chip_version == 0x2627)
31862 val |= 3;
31863 }
31864- lp->a.write_bcr(ioaddr, 9, val);
31865+ lp->a->write_bcr(ioaddr, 9, val);
31866 }
31867
31868 /* set/reset GPSI bit in test register */
31869- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31870+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31871 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31872 val |= 0x10;
31873- lp->a.write_csr(ioaddr, 124, val);
31874+ lp->a->write_csr(ioaddr, 124, val);
31875
31876 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31877 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31878@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31879 * duplex, and/or enable auto negotiation, and clear DANAS
31880 */
31881 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31882- lp->a.write_bcr(ioaddr, 32,
31883- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31884+ lp->a->write_bcr(ioaddr, 32,
31885+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31886 /* disable Auto Negotiation, set 10Mpbs, HD */
31887- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31888+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31889 if (lp->options & PCNET32_PORT_FD)
31890 val |= 0x10;
31891 if (lp->options & PCNET32_PORT_100)
31892 val |= 0x08;
31893- lp->a.write_bcr(ioaddr, 32, val);
31894+ lp->a->write_bcr(ioaddr, 32, val);
31895 } else {
31896 if (lp->options & PCNET32_PORT_ASEL) {
31897- lp->a.write_bcr(ioaddr, 32,
31898- lp->a.read_bcr(ioaddr,
31899+ lp->a->write_bcr(ioaddr, 32,
31900+ lp->a->read_bcr(ioaddr,
31901 32) | 0x0080);
31902 /* enable auto negotiate, setup, disable fd */
31903- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31904+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31905 val |= 0x20;
31906- lp->a.write_bcr(ioaddr, 32, val);
31907+ lp->a->write_bcr(ioaddr, 32, val);
31908 }
31909 }
31910 } else {
31911@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31912 * There is really no good other way to handle multiple PHYs
31913 * other than turning off all automatics
31914 */
31915- val = lp->a.read_bcr(ioaddr, 2);
31916- lp->a.write_bcr(ioaddr, 2, val & ~2);
31917- val = lp->a.read_bcr(ioaddr, 32);
31918- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31919+ val = lp->a->read_bcr(ioaddr, 2);
31920+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31921+ val = lp->a->read_bcr(ioaddr, 32);
31922+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31923
31924 if (!(lp->options & PCNET32_PORT_ASEL)) {
31925 /* setup ecmd */
31926@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31927 ethtool_cmd_speed_set(&ecmd,
31928 (lp->options & PCNET32_PORT_100) ?
31929 SPEED_100 : SPEED_10);
31930- bcr9 = lp->a.read_bcr(ioaddr, 9);
31931+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31932
31933 if (lp->options & PCNET32_PORT_FD) {
31934 ecmd.duplex = DUPLEX_FULL;
31935@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31936 ecmd.duplex = DUPLEX_HALF;
31937 bcr9 |= ~(1 << 0);
31938 }
31939- lp->a.write_bcr(ioaddr, 9, bcr9);
31940+ lp->a->write_bcr(ioaddr, 9, bcr9);
31941 }
31942
31943 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31944@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31945
31946 #ifdef DO_DXSUFLO
31947 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31948- val = lp->a.read_csr(ioaddr, CSR3);
31949+ val = lp->a->read_csr(ioaddr, CSR3);
31950 val |= 0x40;
31951- lp->a.write_csr(ioaddr, CSR3, val);
31952+ lp->a->write_csr(ioaddr, CSR3, val);
31953 }
31954 #endif
31955
31956@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31957 napi_enable(&lp->napi);
31958
31959 /* Re-initialize the PCNET32, and start it when done. */
31960- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31961- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31962+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31963+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31964
31965- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31966- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31967+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31968+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31969
31970 netif_start_queue(dev);
31971
31972@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31973
31974 i = 0;
31975 while (i++ < 100)
31976- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31977+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31978 break;
31979 /*
31980 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31981 * reports that doing so triggers a bug in the '974.
31982 */
31983- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31984+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31985
31986 netif_printk(lp, ifup, KERN_DEBUG, dev,
31987 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31988 i,
31989 (u32) (lp->init_dma_addr),
31990- lp->a.read_csr(ioaddr, CSR0));
31991+ lp->a->read_csr(ioaddr, CSR0));
31992
31993 spin_unlock_irqrestore(&lp->lock, flags);
31994
31995@@ -2218,7 +2218,7 @@ err_free_ring:
31996 * Switch back to 16bit mode to avoid problems with dumb
31997 * DOS packet driver after a warm reboot
31998 */
31999- lp->a.write_bcr(ioaddr, 20, 4);
32000+ lp->a->write_bcr(ioaddr, 20, 4);
32001
32002 err_free_irq:
32003 spin_unlock_irqrestore(&lp->lock, flags);
32004@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
32005
32006 /* wait for stop */
32007 for (i = 0; i < 100; i++)
32008- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
32009+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
32010 break;
32011
32012 if (i >= 100)
32013@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
32014 return;
32015
32016 /* ReInit Ring */
32017- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32018+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32019 i = 0;
32020 while (i++ < 1000)
32021- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32022+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32023 break;
32024
32025- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
32026+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
32027 }
32028
32029 static void pcnet32_tx_timeout(struct net_device *dev)
32030@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
32031 /* Transmitter timeout, serious problems. */
32032 if (pcnet32_debug & NETIF_MSG_DRV)
32033 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
32034- dev->name, lp->a.read_csr(ioaddr, CSR0));
32035- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32036+ dev->name, lp->a->read_csr(ioaddr, CSR0));
32037+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32038 dev->stats.tx_errors++;
32039 if (netif_msg_tx_err(lp)) {
32040 int i;
32041@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32042
32043 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32044 "%s() called, csr0 %4.4x\n",
32045- __func__, lp->a.read_csr(ioaddr, CSR0));
32046+ __func__, lp->a->read_csr(ioaddr, CSR0));
32047
32048 /* Default status -- will not enable Successful-TxDone
32049 * interrupt when that option is available to us.
32050@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32051 dev->stats.tx_bytes += skb->len;
32052
32053 /* Trigger an immediate send poll. */
32054- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32055+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32056
32057 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32058 lp->tx_full = 1;
32059@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32060
32061 spin_lock(&lp->lock);
32062
32063- csr0 = lp->a.read_csr(ioaddr, CSR0);
32064+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32065 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32066 if (csr0 == 0xffff)
32067 break; /* PCMCIA remove happened */
32068 /* Acknowledge all of the current interrupt sources ASAP. */
32069- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32070+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32071
32072 netif_printk(lp, intr, KERN_DEBUG, dev,
32073 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32074- csr0, lp->a.read_csr(ioaddr, CSR0));
32075+ csr0, lp->a->read_csr(ioaddr, CSR0));
32076
32077 /* Log misc errors. */
32078 if (csr0 & 0x4000)
32079@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32080 if (napi_schedule_prep(&lp->napi)) {
32081 u16 val;
32082 /* set interrupt masks */
32083- val = lp->a.read_csr(ioaddr, CSR3);
32084+ val = lp->a->read_csr(ioaddr, CSR3);
32085 val |= 0x5f00;
32086- lp->a.write_csr(ioaddr, CSR3, val);
32087+ lp->a->write_csr(ioaddr, CSR3, val);
32088
32089 __napi_schedule(&lp->napi);
32090 break;
32091 }
32092- csr0 = lp->a.read_csr(ioaddr, CSR0);
32093+ csr0 = lp->a->read_csr(ioaddr, CSR0);
32094 }
32095
32096 netif_printk(lp, intr, KERN_DEBUG, dev,
32097 "exiting interrupt, csr0=%#4.4x\n",
32098- lp->a.read_csr(ioaddr, CSR0));
32099+ lp->a->read_csr(ioaddr, CSR0));
32100
32101 spin_unlock(&lp->lock);
32102
32103@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32104
32105 spin_lock_irqsave(&lp->lock, flags);
32106
32107- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32108+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32109
32110 netif_printk(lp, ifdown, KERN_DEBUG, dev,
32111 "Shutting down ethercard, status was %2.2x\n",
32112- lp->a.read_csr(ioaddr, CSR0));
32113+ lp->a->read_csr(ioaddr, CSR0));
32114
32115 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32116- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32117+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32118
32119 /*
32120 * Switch back to 16bit mode to avoid problems with dumb
32121 * DOS packet driver after a warm reboot
32122 */
32123- lp->a.write_bcr(ioaddr, 20, 4);
32124+ lp->a->write_bcr(ioaddr, 20, 4);
32125
32126 spin_unlock_irqrestore(&lp->lock, flags);
32127
32128@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32129 unsigned long flags;
32130
32131 spin_lock_irqsave(&lp->lock, flags);
32132- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32133+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32134 spin_unlock_irqrestore(&lp->lock, flags);
32135
32136 return &dev->stats;
32137@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struc
32138 if (dev->flags & IFF_ALLMULTI) {
32139 ib->filter[0] = cpu_to_le32(~0U);
32140 ib->filter[1] = cpu_to_le32(~0U);
32141- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32142- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32143- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32144- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32145+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32146+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32147+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32148+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32149 return;
32150 }
32151 /* clear the multicast filter */
32152@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struc
32153 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32154 }
32155 for (i = 0; i < 4; i++)
32156- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32157+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32158 le16_to_cpu(mcast_table[i]));
32159 }
32160
32161@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(s
32162
32163 spin_lock_irqsave(&lp->lock, flags);
32164 suspended = pcnet32_suspend(dev, &flags, 0);
32165- csr15 = lp->a.read_csr(ioaddr, CSR15);
32166+ csr15 = lp->a->read_csr(ioaddr, CSR15);
32167 if (dev->flags & IFF_PROMISC) {
32168 /* Log any net taps. */
32169 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32170 lp->init_block->mode =
32171 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32172 7);
32173- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32174+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32175 } else {
32176 lp->init_block->mode =
32177 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32178- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32179+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32180 pcnet32_load_multicast(dev);
32181 }
32182
32183 if (suspended) {
32184 int csr5;
32185 /* clear SUSPEND (SPND) - CSR5 bit 0 */
32186- csr5 = lp->a.read_csr(ioaddr, CSR5);
32187- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32188+ csr5 = lp->a->read_csr(ioaddr, CSR5);
32189+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32190 } else {
32191- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32192+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32193 pcnet32_restart(dev, CSR0_NORMAL);
32194 netif_wake_queue(dev);
32195 }
32196@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *
32197 if (!lp->mii)
32198 return 0;
32199
32200- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32201- val_out = lp->a.read_bcr(ioaddr, 34);
32202+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32203+ val_out = lp->a->read_bcr(ioaddr, 34);
32204
32205 return val_out;
32206 }
32207@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device
32208 if (!lp->mii)
32209 return;
32210
32211- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32212- lp->a.write_bcr(ioaddr, 34, val);
32213+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32214+ lp->a->write_bcr(ioaddr, 34, val);
32215 }
32216
32217 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32218@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct n
32219 curr_link = mii_link_ok(&lp->mii_if);
32220 } else {
32221 ulong ioaddr = dev->base_addr; /* card base I/O address */
32222- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32223+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32224 }
32225 if (!curr_link) {
32226 if (prev_link || verbose) {
32227@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct n
32228 (ecmd.duplex == DUPLEX_FULL)
32229 ? "full" : "half");
32230 }
32231- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
32232+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
32233 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
32234 if (lp->mii_if.full_duplex)
32235 bcr9 |= (1 << 0);
32236 else
32237 bcr9 &= ~(1 << 0);
32238- lp->a.write_bcr(dev->base_addr, 9, bcr9);
32239+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
32240 }
32241 } else {
32242 netif_info(lp, link, dev, "link up\n");
32243diff -urNp linux-3.1.1/drivers/net/ppp_generic.c linux-3.1.1/drivers/net/ppp_generic.c
32244--- linux-3.1.1/drivers/net/ppp_generic.c 2011-11-11 15:19:27.000000000 -0500
32245+++ linux-3.1.1/drivers/net/ppp_generic.c 2011-11-16 18:39:07.000000000 -0500
32246@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
32247 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32248 struct ppp_stats stats;
32249 struct ppp_comp_stats cstats;
32250- char *vers;
32251
32252 switch (cmd) {
32253 case SIOCGPPPSTATS:
32254@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
32255 break;
32256
32257 case SIOCGPPPVER:
32258- vers = PPP_VERSION;
32259- if (copy_to_user(addr, vers, strlen(vers) + 1))
32260+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32261 break;
32262 err = 0;
32263 break;
32264diff -urNp linux-3.1.1/drivers/net/r8169.c linux-3.1.1/drivers/net/r8169.c
32265--- linux-3.1.1/drivers/net/r8169.c 2011-11-11 15:19:27.000000000 -0500
32266+++ linux-3.1.1/drivers/net/r8169.c 2011-11-16 18:39:07.000000000 -0500
32267@@ -663,12 +663,12 @@ struct rtl8169_private {
32268 struct mdio_ops {
32269 void (*write)(void __iomem *, int, int);
32270 int (*read)(void __iomem *, int);
32271- } mdio_ops;
32272+ } __no_const mdio_ops;
32273
32274 struct pll_power_ops {
32275 void (*down)(struct rtl8169_private *);
32276 void (*up)(struct rtl8169_private *);
32277- } pll_power_ops;
32278+ } __no_const pll_power_ops;
32279
32280 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32281 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32282diff -urNp linux-3.1.1/drivers/net/sis190.c linux-3.1.1/drivers/net/sis190.c
32283--- linux-3.1.1/drivers/net/sis190.c 2011-11-11 15:19:27.000000000 -0500
32284+++ linux-3.1.1/drivers/net/sis190.c 2011-11-16 18:39:07.000000000 -0500
32285@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr
32286 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32287 struct net_device *dev)
32288 {
32289- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32290+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32291 struct sis190_private *tp = netdev_priv(dev);
32292 struct pci_dev *isa_bridge;
32293 u8 reg, tmp8;
32294diff -urNp linux-3.1.1/drivers/net/sundance.c linux-3.1.1/drivers/net/sundance.c
32295--- linux-3.1.1/drivers/net/sundance.c 2011-11-11 15:19:27.000000000 -0500
32296+++ linux-3.1.1/drivers/net/sundance.c 2011-11-16 18:39:07.000000000 -0500
32297@@ -218,7 +218,7 @@ enum {
32298 struct pci_id_info {
32299 const char *name;
32300 };
32301-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32302+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32303 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32304 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32305 {"D-Link DFE-580TX 4 port Server Adapter"},
32306diff -urNp linux-3.1.1/drivers/net/tg3.h linux-3.1.1/drivers/net/tg3.h
32307--- linux-3.1.1/drivers/net/tg3.h 2011-11-11 15:19:27.000000000 -0500
32308+++ linux-3.1.1/drivers/net/tg3.h 2011-11-16 18:39:07.000000000 -0500
32309@@ -134,6 +134,7 @@
32310 #define CHIPREV_ID_5750_A0 0x4000
32311 #define CHIPREV_ID_5750_A1 0x4001
32312 #define CHIPREV_ID_5750_A3 0x4003
32313+#define CHIPREV_ID_5750_C1 0x4201
32314 #define CHIPREV_ID_5750_C2 0x4202
32315 #define CHIPREV_ID_5752_A0_HW 0x5000
32316 #define CHIPREV_ID_5752_A0 0x6000
32317diff -urNp linux-3.1.1/drivers/net/tokenring/abyss.c linux-3.1.1/drivers/net/tokenring/abyss.c
32318--- linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-11 15:19:27.000000000 -0500
32319+++ linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-16 18:39:07.000000000 -0500
32320@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
32321
32322 static int __init abyss_init (void)
32323 {
32324- abyss_netdev_ops = tms380tr_netdev_ops;
32325+ pax_open_kernel();
32326+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32327
32328- abyss_netdev_ops.ndo_open = abyss_open;
32329- abyss_netdev_ops.ndo_stop = abyss_close;
32330+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32331+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32332+ pax_close_kernel();
32333
32334 return pci_register_driver(&abyss_driver);
32335 }
32336diff -urNp linux-3.1.1/drivers/net/tokenring/madgemc.c linux-3.1.1/drivers/net/tokenring/madgemc.c
32337--- linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-11 15:19:27.000000000 -0500
32338+++ linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-16 18:39:07.000000000 -0500
32339@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32340
32341 static int __init madgemc_init (void)
32342 {
32343- madgemc_netdev_ops = tms380tr_netdev_ops;
32344- madgemc_netdev_ops.ndo_open = madgemc_open;
32345- madgemc_netdev_ops.ndo_stop = madgemc_close;
32346+ pax_open_kernel();
32347+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32348+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32349+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32350+ pax_close_kernel();
32351
32352 return mca_register_driver (&madgemc_driver);
32353 }
32354diff -urNp linux-3.1.1/drivers/net/tokenring/proteon.c linux-3.1.1/drivers/net/tokenring/proteon.c
32355--- linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-11 15:19:27.000000000 -0500
32356+++ linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-16 18:39:07.000000000 -0500
32357@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32358 struct platform_device *pdev;
32359 int i, num = 0, err = 0;
32360
32361- proteon_netdev_ops = tms380tr_netdev_ops;
32362- proteon_netdev_ops.ndo_open = proteon_open;
32363- proteon_netdev_ops.ndo_stop = tms380tr_close;
32364+ pax_open_kernel();
32365+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32366+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32367+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32368+ pax_close_kernel();
32369
32370 err = platform_driver_register(&proteon_driver);
32371 if (err)
32372diff -urNp linux-3.1.1/drivers/net/tokenring/skisa.c linux-3.1.1/drivers/net/tokenring/skisa.c
32373--- linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-11 15:19:27.000000000 -0500
32374+++ linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-16 18:39:07.000000000 -0500
32375@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32376 struct platform_device *pdev;
32377 int i, num = 0, err = 0;
32378
32379- sk_isa_netdev_ops = tms380tr_netdev_ops;
32380- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32381- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32382+ pax_open_kernel();
32383+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32384+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32385+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32386+ pax_close_kernel();
32387
32388 err = platform_driver_register(&sk_isa_driver);
32389 if (err)
32390diff -urNp linux-3.1.1/drivers/net/tulip/de2104x.c linux-3.1.1/drivers/net/tulip/de2104x.c
32391--- linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-11 15:19:27.000000000 -0500
32392+++ linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-16 18:40:22.000000000 -0500
32393@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_i
32394 struct de_srom_info_leaf *il;
32395 void *bufp;
32396
32397+ pax_track_stack();
32398+
32399 /* download entire eeprom */
32400 for (i = 0; i < DE_EEPROM_WORDS; i++)
32401 ((__le16 *)ee_data)[i] =
32402diff -urNp linux-3.1.1/drivers/net/tulip/de4x5.c linux-3.1.1/drivers/net/tulip/de4x5.c
32403--- linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-11 15:19:27.000000000 -0500
32404+++ linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-16 18:39:07.000000000 -0500
32405@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, stru
32406 for (i=0; i<ETH_ALEN; i++) {
32407 tmp.addr[i] = dev->dev_addr[i];
32408 }
32409- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32410+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32411 break;
32412
32413 case DE4X5_SET_HWADDR: /* Set the hardware address */
32414@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, stru
32415 spin_lock_irqsave(&lp->lock, flags);
32416 memcpy(&statbuf, &lp->pktStats, ioc->len);
32417 spin_unlock_irqrestore(&lp->lock, flags);
32418- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32419+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32420 return -EFAULT;
32421 break;
32422 }
32423diff -urNp linux-3.1.1/drivers/net/tulip/eeprom.c linux-3.1.1/drivers/net/tulip/eeprom.c
32424--- linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-11 15:19:27.000000000 -0500
32425+++ linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-16 18:39:07.000000000 -0500
32426@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32427 {NULL}};
32428
32429
32430-static const char *block_name[] __devinitdata = {
32431+static const char *block_name[] __devinitconst = {
32432 "21140 non-MII",
32433 "21140 MII PHY",
32434 "21142 Serial PHY",
32435diff -urNp linux-3.1.1/drivers/net/tulip/winbond-840.c linux-3.1.1/drivers/net/tulip/winbond-840.c
32436--- linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-11 15:19:27.000000000 -0500
32437+++ linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-16 18:39:07.000000000 -0500
32438@@ -236,7 +236,7 @@ struct pci_id_info {
32439 int drv_flags; /* Driver use, intended as capability flags. */
32440 };
32441
32442-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32443+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32444 { /* Sometime a Level-One switch card. */
32445 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32446 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32447diff -urNp linux-3.1.1/drivers/net/usb/hso.c linux-3.1.1/drivers/net/usb/hso.c
32448--- linux-3.1.1/drivers/net/usb/hso.c 2011-11-11 15:19:27.000000000 -0500
32449+++ linux-3.1.1/drivers/net/usb/hso.c 2011-11-16 18:39:07.000000000 -0500
32450@@ -71,7 +71,7 @@
32451 #include <asm/byteorder.h>
32452 #include <linux/serial_core.h>
32453 #include <linux/serial.h>
32454-
32455+#include <asm/local.h>
32456
32457 #define MOD_AUTHOR "Option Wireless"
32458 #define MOD_DESCRIPTION "USB High Speed Option driver"
32459@@ -257,7 +257,7 @@ struct hso_serial {
32460
32461 /* from usb_serial_port */
32462 struct tty_struct *tty;
32463- int open_count;
32464+ local_t open_count;
32465 spinlock_t serial_lock;
32466
32467 int (*write_data) (struct hso_serial *serial);
32468@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32469 struct urb *urb;
32470
32471 urb = serial->rx_urb[0];
32472- if (serial->open_count > 0) {
32473+ if (local_read(&serial->open_count) > 0) {
32474 count = put_rxbuf_data(urb, serial);
32475 if (count == -1)
32476 return;
32477@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32478 DUMP1(urb->transfer_buffer, urb->actual_length);
32479
32480 /* Anyone listening? */
32481- if (serial->open_count == 0)
32482+ if (local_read(&serial->open_count) == 0)
32483 return;
32484
32485 if (status == 0) {
32486@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32487 spin_unlock_irq(&serial->serial_lock);
32488
32489 /* check for port already opened, if not set the termios */
32490- serial->open_count++;
32491- if (serial->open_count == 1) {
32492+ if (local_inc_return(&serial->open_count) == 1) {
32493 serial->rx_state = RX_IDLE;
32494 /* Force default termio settings */
32495 _hso_serial_set_termios(tty, NULL);
32496@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32497 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32498 if (result) {
32499 hso_stop_serial_device(serial->parent);
32500- serial->open_count--;
32501+ local_dec(&serial->open_count);
32502 kref_put(&serial->parent->ref, hso_serial_ref_free);
32503 }
32504 } else {
32505@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32506
32507 /* reset the rts and dtr */
32508 /* do the actual close */
32509- serial->open_count--;
32510+ local_dec(&serial->open_count);
32511
32512- if (serial->open_count <= 0) {
32513- serial->open_count = 0;
32514+ if (local_read(&serial->open_count) <= 0) {
32515+ local_set(&serial->open_count, 0);
32516 spin_lock_irq(&serial->serial_lock);
32517 if (serial->tty == tty) {
32518 serial->tty->driver_data = NULL;
32519@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32520
32521 /* the actual setup */
32522 spin_lock_irqsave(&serial->serial_lock, flags);
32523- if (serial->open_count)
32524+ if (local_read(&serial->open_count))
32525 _hso_serial_set_termios(tty, old);
32526 else
32527 tty->termios = old;
32528@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32529 D1("Pending read interrupt on port %d\n", i);
32530 spin_lock(&serial->serial_lock);
32531 if (serial->rx_state == RX_IDLE &&
32532- serial->open_count > 0) {
32533+ local_read(&serial->open_count) > 0) {
32534 /* Setup and send a ctrl req read on
32535 * port i */
32536 if (!serial->rx_urb_filled[0]) {
32537@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32538 /* Start all serial ports */
32539 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32540 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32541- if (dev2ser(serial_table[i])->open_count) {
32542+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32543 result =
32544 hso_start_serial_device(serial_table[i], GFP_NOIO);
32545 hso_kick_transmit(dev2ser(serial_table[i]));
32546diff -urNp linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c
32547--- linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-11 15:19:27.000000000 -0500
32548+++ linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-16 18:39:07.000000000 -0500
32549@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device
32550 * Return with error code if any of the queue indices
32551 * is out of range
32552 */
32553- if (p->ring_index[i] < 0 ||
32554- p->ring_index[i] >= adapter->num_rx_queues)
32555+ if (p->ring_index[i] >= adapter->num_rx_queues)
32556 return -EINVAL;
32557 }
32558
32559diff -urNp linux-3.1.1/drivers/net/vxge/vxge-config.h linux-3.1.1/drivers/net/vxge/vxge-config.h
32560--- linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-11 15:19:27.000000000 -0500
32561+++ linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-16 18:39:07.000000000 -0500
32562@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32563 void (*link_down)(struct __vxge_hw_device *devh);
32564 void (*crit_err)(struct __vxge_hw_device *devh,
32565 enum vxge_hw_event type, u64 ext_data);
32566-};
32567+} __no_const;
32568
32569 /*
32570 * struct __vxge_hw_blockpool_entry - Block private data structure
32571diff -urNp linux-3.1.1/drivers/net/vxge/vxge-main.c linux-3.1.1/drivers/net/vxge/vxge-main.c
32572--- linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-11 15:19:27.000000000 -0500
32573+++ linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-16 18:40:22.000000000 -0500
32574@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32575 struct sk_buff *completed[NR_SKB_COMPLETED];
32576 int more;
32577
32578+ pax_track_stack();
32579+
32580 do {
32581 more = 0;
32582 skb_ptr = completed;
32583@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_conf
32584 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32585 int index;
32586
32587+ pax_track_stack();
32588+
32589 /*
32590 * Filling
32591 * - itable with bucket numbers
32592diff -urNp linux-3.1.1/drivers/net/vxge/vxge-traffic.h linux-3.1.1/drivers/net/vxge/vxge-traffic.h
32593--- linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-11 15:19:27.000000000 -0500
32594+++ linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-16 18:39:07.000000000 -0500
32595@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32596 struct vxge_hw_mempool_dma *dma_object,
32597 u32 index,
32598 u32 is_last);
32599-};
32600+} __no_const;
32601
32602 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32603 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32604diff -urNp linux-3.1.1/drivers/net/wan/hdlc_x25.c linux-3.1.1/drivers/net/wan/hdlc_x25.c
32605--- linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-11 15:19:27.000000000 -0500
32606+++ linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-16 18:39:07.000000000 -0500
32607@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32608
32609 static int x25_open(struct net_device *dev)
32610 {
32611- struct lapb_register_struct cb;
32612+ static struct lapb_register_struct cb = {
32613+ .connect_confirmation = x25_connected,
32614+ .connect_indication = x25_connected,
32615+ .disconnect_confirmation = x25_disconnected,
32616+ .disconnect_indication = x25_disconnected,
32617+ .data_indication = x25_data_indication,
32618+ .data_transmit = x25_data_transmit
32619+ };
32620 int result;
32621
32622- cb.connect_confirmation = x25_connected;
32623- cb.connect_indication = x25_connected;
32624- cb.disconnect_confirmation = x25_disconnected;
32625- cb.disconnect_indication = x25_disconnected;
32626- cb.data_indication = x25_data_indication;
32627- cb.data_transmit = x25_data_transmit;
32628-
32629 result = lapb_register(dev, &cb);
32630 if (result != LAPB_OK)
32631 return result;
32632diff -urNp linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c
32633--- linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-11 15:19:27.000000000 -0500
32634+++ linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-16 18:40:22.000000000 -0500
32635@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32636 int do_autopm = 1;
32637 DECLARE_COMPLETION_ONSTACK(notif_completion);
32638
32639+ pax_track_stack();
32640+
32641 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32642 i2400m, ack, ack_size);
32643 BUG_ON(_ack == i2400m->bm_ack_buf);
32644diff -urNp linux-3.1.1/drivers/net/wireless/airo.c linux-3.1.1/drivers/net/wireless/airo.c
32645--- linux-3.1.1/drivers/net/wireless/airo.c 2011-11-11 15:19:27.000000000 -0500
32646+++ linux-3.1.1/drivers/net/wireless/airo.c 2011-11-16 18:40:22.000000000 -0500
32647@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32648 BSSListElement * loop_net;
32649 BSSListElement * tmp_net;
32650
32651+ pax_track_stack();
32652+
32653 /* Blow away current list of scan results */
32654 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32655 list_move_tail (&loop_net->list, &ai->network_free_list);
32656@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32657 WepKeyRid wkr;
32658 int rc;
32659
32660+ pax_track_stack();
32661+
32662 memset( &mySsid, 0, sizeof( mySsid ) );
32663 kfree (ai->flash);
32664 ai->flash = NULL;
32665@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32666 __le32 *vals = stats.vals;
32667 int len;
32668
32669+ pax_track_stack();
32670+
32671 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32672 return -ENOMEM;
32673 data = file->private_data;
32674@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32675 /* If doLoseSync is not 1, we won't do a Lose Sync */
32676 int doLoseSync = -1;
32677
32678+ pax_track_stack();
32679+
32680 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32681 return -ENOMEM;
32682 data = file->private_data;
32683@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32684 int i;
32685 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32686
32687+ pax_track_stack();
32688+
32689 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32690 if (!qual)
32691 return -ENOMEM;
32692@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32693 CapabilityRid cap_rid;
32694 __le32 *vals = stats_rid.vals;
32695
32696+ pax_track_stack();
32697+
32698 /* Get stats out of the card */
32699 clear_bit(JOB_WSTATS, &local->jobs);
32700 if (local->power.event) {
32701diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c
32702--- linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-11 15:19:27.000000000 -0500
32703+++ linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-16 19:08:21.000000000 -0500
32704@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct f
32705 unsigned int v;
32706 u64 tsf;
32707
32708+ pax_track_stack();
32709+
32710 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
32711 len += snprintf(buf + len, sizeof(buf) - len,
32712 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32713@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct fi
32714 unsigned int len = 0;
32715 unsigned int i;
32716
32717+ pax_track_stack();
32718+
32719 len += snprintf(buf + len, sizeof(buf) - len,
32720 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
32721
32722@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct fil
32723 unsigned int len = 0;
32724 u32 filt = ath5k_hw_get_rx_filter(ah);
32725
32726+ pax_track_stack();
32727+
32728 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
32729 ah->bssidmask);
32730 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
32731@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(str
32732 unsigned int len = 0;
32733 int i;
32734
32735+ pax_track_stack();
32736+
32737 len += snprintf(buf + len, sizeof(buf) - len,
32738 "RX\n---------------------\n");
32739 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
32740@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file
32741 char buf[700];
32742 unsigned int len = 0;
32743
32744+ pax_track_stack();
32745+
32746 len += snprintf(buf + len, sizeof(buf) - len,
32747 "HW has PHY error counters:\t%s\n",
32748 ah->ah_capabilities.cap_has_phyerr_counters ?
32749@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32750 struct ath5k_buf *bf, *bf0;
32751 int i, n;
32752
32753+ pax_track_stack();
32754+
32755 len += snprintf(buf + len, sizeof(buf) - len,
32756 "available txbuffers: %d\n", ah->txbuf_len);
32757
32758diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32759--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-11 15:19:27.000000000 -0500
32760+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-16 18:40:22.000000000 -0500
32761@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32762 int i, im, j;
32763 int nmeasurement;
32764
32765+ pax_track_stack();
32766+
32767 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32768 if (ah->txchainmask & (1 << i))
32769 num_chains++;
32770diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32771--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-11 15:19:27.000000000 -0500
32772+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-16 18:40:22.000000000 -0500
32773@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L,
32774 int theta_low_bin = 0;
32775 int i;
32776
32777+ pax_track_stack();
32778+
32779 /* disregard any bin that contains <= 16 samples */
32780 thresh_accum_cnt = 16;
32781 scale_factor = 5;
32782diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c
32783--- linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-11 15:19:27.000000000 -0500
32784+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-16 18:40:22.000000000 -0500
32785@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struc
32786 char buf[512];
32787 unsigned int len = 0;
32788
32789+ pax_track_stack();
32790+
32791 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32792 len += snprintf(buf + len, sizeof(buf) - len,
32793 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32794@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct fi
32795 u8 addr[ETH_ALEN];
32796 u32 tmp;
32797
32798+ pax_track_stack();
32799+
32800 len += snprintf(buf + len, sizeof(buf) - len,
32801 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32802 wiphy_name(sc->hw->wiphy),
32803diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32804--- linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-11 15:19:27.000000000 -0500
32805+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-16 18:40:22.000000000 -0500
32806@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32807 unsigned int len = 0;
32808 int ret = 0;
32809
32810+ pax_track_stack();
32811+
32812 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32813
32814 ath9k_htc_ps_wakeup(priv);
32815@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32816 unsigned int len = 0;
32817 int ret = 0;
32818
32819+ pax_track_stack();
32820+
32821 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32822
32823 ath9k_htc_ps_wakeup(priv);
32824@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32825 unsigned int len = 0;
32826 int ret = 0;
32827
32828+ pax_track_stack();
32829+
32830 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32831
32832 ath9k_htc_ps_wakeup(priv);
32833@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32834 char buf[512];
32835 unsigned int len = 0;
32836
32837+ pax_track_stack();
32838+
32839 len += snprintf(buf + len, sizeof(buf) - len,
32840 "%20s : %10u\n", "Buffers queued",
32841 priv->debug.tx_stats.buf_queued);
32842@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32843 char buf[512];
32844 unsigned int len = 0;
32845
32846+ pax_track_stack();
32847+
32848 spin_lock_bh(&priv->tx.tx_lock);
32849
32850 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32851@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32852 char buf[512];
32853 unsigned int len = 0;
32854
32855+ pax_track_stack();
32856+
32857 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32858 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32859
32860diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h
32861--- linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-11 15:19:27.000000000 -0500
32862+++ linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-16 18:39:07.000000000 -0500
32863@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
32864
32865 /* ANI */
32866 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32867-};
32868+} __no_const;
32869
32870 /**
32871 * struct ath_hw_ops - callbacks used by hardware code and driver code
32872@@ -639,7 +639,7 @@ struct ath_hw_ops {
32873 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32874 struct ath_hw_antcomb_conf *antconf);
32875
32876-};
32877+} __no_const;
32878
32879 struct ath_nf_limits {
32880 s16 max;
32881@@ -652,7 +652,7 @@ struct ath_nf_limits {
32882 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32883
32884 struct ath_hw {
32885- struct ath_ops reg_ops;
32886+ ath_ops_no_const reg_ops;
32887
32888 struct ieee80211_hw *hw;
32889 struct ath_common common;
32890diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath.h linux-3.1.1/drivers/net/wireless/ath/ath.h
32891--- linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-11 15:19:27.000000000 -0500
32892+++ linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-16 18:39:07.000000000 -0500
32893@@ -121,6 +121,7 @@ struct ath_ops {
32894 void (*write_flush) (void *);
32895 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32896 };
32897+typedef struct ath_ops __no_const ath_ops_no_const;
32898
32899 struct ath_common;
32900 struct ath_bus_ops;
32901diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c
32902--- linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-11 15:19:27.000000000 -0500
32903+++ linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-16 18:40:22.000000000 -0500
32904@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2
32905 int err;
32906 DECLARE_SSID_BUF(ssid);
32907
32908+ pax_track_stack();
32909+
32910 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32911
32912 if (ssid_len)
32913@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw210
32914 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32915 int err;
32916
32917+ pax_track_stack();
32918+
32919 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32920 idx, keylen, len);
32921
32922diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c
32923--- linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-11 15:19:27.000000000 -0500
32924+++ linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-16 18:40:22.000000000 -0500
32925@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32926 unsigned long flags;
32927 DECLARE_SSID_BUF(ssid);
32928
32929+ pax_track_stack();
32930+
32931 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32932 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32933 print_ssid(ssid, info_element->data, info_element->len),
32934diff -urNp linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c
32935--- linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-11 15:19:27.000000000 -0500
32936+++ linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-16 18:39:07.000000000 -0500
32937@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_
32938 */
32939 if (iwl3945_mod_params.disable_hw_scan) {
32940 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32941- iwl3945_hw_ops.hw_scan = NULL;
32942+ pax_open_kernel();
32943+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32944+ pax_close_kernel();
32945 }
32946
32947 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32948diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32949--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-11 15:19:27.000000000 -0500
32950+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-16 18:40:22.000000000 -0500
32951@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, s
32952 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32953 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32954
32955+ pax_track_stack();
32956+
32957 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32958
32959 /* Treat uninitialized rate scaling data same as non-existing. */
32960@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_
32961 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32962 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32963
32964+ pax_track_stack();
32965+
32966 /* Override starting rate (index 0) if needed for debug purposes */
32967 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32968
32969diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32970--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-11 15:19:27.000000000 -0500
32971+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-16 18:40:22.000000000 -0500
32972@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(str
32973 int pos = 0;
32974 const size_t bufsz = sizeof(buf);
32975
32976+ pax_track_stack();
32977+
32978 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32979 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32980 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32981@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32982 char buf[256 * NUM_IWL_RXON_CTX];
32983 const size_t bufsz = sizeof(buf);
32984
32985+ pax_track_stack();
32986+
32987 for_each_context(priv, ctx) {
32988 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32989 ctx->ctxid);
32990diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h
32991--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-11 15:19:27.000000000 -0500
32992+++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-16 18:39:07.000000000 -0500
32993@@ -68,8 +68,8 @@ do {
32994 } while (0)
32995
32996 #else
32997-#define IWL_DEBUG(__priv, level, fmt, args...)
32998-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32999+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33000+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33001 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33002 const void *p, u32 len)
33003 {}
33004diff -urNp linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c
33005--- linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-11 15:19:27.000000000 -0500
33006+++ linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-16 18:40:22.000000000 -0500
33007@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33008 int buf_len = 512;
33009 size_t len = 0;
33010
33011+ pax_track_stack();
33012+
33013 if (*ppos != 0)
33014 return 0;
33015 if (count < sizeof(buf))
33016diff -urNp linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c
33017--- linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-11 15:19:27.000000000 -0500
33018+++ linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-16 18:39:07.000000000 -0500
33019@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(vo
33020 return -EINVAL;
33021
33022 if (fake_hw_scan) {
33023- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33024- mac80211_hwsim_ops.sw_scan_start = NULL;
33025- mac80211_hwsim_ops.sw_scan_complete = NULL;
33026+ pax_open_kernel();
33027+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33028+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33029+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33030+ pax_close_kernel();
33031 }
33032
33033 spin_lock_init(&hwsim_radio_lock);
33034diff -urNp linux-3.1.1/drivers/net/wireless/mwifiex/main.h linux-3.1.1/drivers/net/wireless/mwifiex/main.h
33035--- linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-11 15:19:27.000000000 -0500
33036+++ linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-16 18:39:07.000000000 -0500
33037@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
33038
33039 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
33040 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33041-};
33042+} __no_const;
33043
33044 struct mwifiex_adapter {
33045 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
33046diff -urNp linux-3.1.1/drivers/net/wireless/rndis_wlan.c linux-3.1.1/drivers/net/wireless/rndis_wlan.c
33047--- linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-11 15:19:27.000000000 -0500
33048+++ linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-16 18:39:07.000000000 -0500
33049@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33050
33051 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33052
33053- if (rts_threshold < 0 || rts_threshold > 2347)
33054+ if (rts_threshold > 2347)
33055 rts_threshold = 2347;
33056
33057 tmp = cpu_to_le32(rts_threshold);
33058diff -urNp linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33059--- linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-11 15:19:27.000000000 -0500
33060+++ linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-16 18:40:22.000000000 -0500
33061@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33062 u8 rfpath;
33063 u8 num_total_rfpath = rtlphy->num_total_rfpath;
33064
33065+ pax_track_stack();
33066+
33067 precommoncmdcnt = 0;
33068 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33069 MAX_PRECMD_CNT,
33070diff -urNp linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h
33071--- linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-11 15:19:27.000000000 -0500
33072+++ linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-16 18:39:07.000000000 -0500
33073@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33074 void (*reset)(struct wl1251 *wl);
33075 void (*enable_irq)(struct wl1251 *wl);
33076 void (*disable_irq)(struct wl1251 *wl);
33077-};
33078+} __no_const;
33079
33080 struct wl1251 {
33081 struct ieee80211_hw *hw;
33082diff -urNp linux-3.1.1/drivers/net/wireless/wl12xx/spi.c linux-3.1.1/drivers/net/wireless/wl12xx/spi.c
33083--- linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-11 15:19:27.000000000 -0500
33084+++ linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-16 18:40:22.000000000 -0500
33085@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct
33086 u32 chunk_len;
33087 int i;
33088
33089+ pax_track_stack();
33090+
33091 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33092
33093 spi_message_init(&m);
33094diff -urNp linux-3.1.1/drivers/oprofile/buffer_sync.c linux-3.1.1/drivers/oprofile/buffer_sync.c
33095--- linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-11 15:19:27.000000000 -0500
33096+++ linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-16 18:39:07.000000000 -0500
33097@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33098 if (cookie == NO_COOKIE)
33099 offset = pc;
33100 if (cookie == INVALID_COOKIE) {
33101- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33102+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33103 offset = pc;
33104 }
33105 if (cookie != last_cookie) {
33106@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33107 /* add userspace sample */
33108
33109 if (!mm) {
33110- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33111+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33112 return 0;
33113 }
33114
33115 cookie = lookup_dcookie(mm, s->eip, &offset);
33116
33117 if (cookie == INVALID_COOKIE) {
33118- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33119+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33120 return 0;
33121 }
33122
33123@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33124 /* ignore backtraces if failed to add a sample */
33125 if (state == sb_bt_start) {
33126 state = sb_bt_ignore;
33127- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33128+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33129 }
33130 }
33131 release_mm(mm);
33132diff -urNp linux-3.1.1/drivers/oprofile/event_buffer.c linux-3.1.1/drivers/oprofile/event_buffer.c
33133--- linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-11 15:19:27.000000000 -0500
33134+++ linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-16 18:39:07.000000000 -0500
33135@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33136 }
33137
33138 if (buffer_pos == buffer_size) {
33139- atomic_inc(&oprofile_stats.event_lost_overflow);
33140+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33141 return;
33142 }
33143
33144diff -urNp linux-3.1.1/drivers/oprofile/oprof.c linux-3.1.1/drivers/oprofile/oprof.c
33145--- linux-3.1.1/drivers/oprofile/oprof.c 2011-11-11 15:19:27.000000000 -0500
33146+++ linux-3.1.1/drivers/oprofile/oprof.c 2011-11-16 18:39:07.000000000 -0500
33147@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33148 if (oprofile_ops.switch_events())
33149 return;
33150
33151- atomic_inc(&oprofile_stats.multiplex_counter);
33152+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33153 start_switch_worker();
33154 }
33155
33156diff -urNp linux-3.1.1/drivers/oprofile/oprofilefs.c linux-3.1.1/drivers/oprofile/oprofilefs.c
33157--- linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-11 15:19:27.000000000 -0500
33158+++ linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-16 18:39:07.000000000 -0500
33159@@ -186,7 +186,7 @@ static const struct file_operations atom
33160
33161
33162 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33163- char const *name, atomic_t *val)
33164+ char const *name, atomic_unchecked_t *val)
33165 {
33166 return __oprofilefs_create_file(sb, root, name,
33167 &atomic_ro_fops, 0444, val);
33168diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.c linux-3.1.1/drivers/oprofile/oprofile_stats.c
33169--- linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-11 15:19:27.000000000 -0500
33170+++ linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-16 18:39:07.000000000 -0500
33171@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33172 cpu_buf->sample_invalid_eip = 0;
33173 }
33174
33175- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33176- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33177- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33178- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33179- atomic_set(&oprofile_stats.multiplex_counter, 0);
33180+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33181+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33182+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33183+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33184+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33185 }
33186
33187
33188diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.h linux-3.1.1/drivers/oprofile/oprofile_stats.h
33189--- linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-11 15:19:27.000000000 -0500
33190+++ linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-16 18:39:07.000000000 -0500
33191@@ -13,11 +13,11 @@
33192 #include <linux/atomic.h>
33193
33194 struct oprofile_stat_struct {
33195- atomic_t sample_lost_no_mm;
33196- atomic_t sample_lost_no_mapping;
33197- atomic_t bt_lost_no_mapping;
33198- atomic_t event_lost_overflow;
33199- atomic_t multiplex_counter;
33200+ atomic_unchecked_t sample_lost_no_mm;
33201+ atomic_unchecked_t sample_lost_no_mapping;
33202+ atomic_unchecked_t bt_lost_no_mapping;
33203+ atomic_unchecked_t event_lost_overflow;
33204+ atomic_unchecked_t multiplex_counter;
33205 };
33206
33207 extern struct oprofile_stat_struct oprofile_stats;
33208diff -urNp linux-3.1.1/drivers/parport/procfs.c linux-3.1.1/drivers/parport/procfs.c
33209--- linux-3.1.1/drivers/parport/procfs.c 2011-11-11 15:19:27.000000000 -0500
33210+++ linux-3.1.1/drivers/parport/procfs.c 2011-11-16 18:39:07.000000000 -0500
33211@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33212
33213 *ppos += len;
33214
33215- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33216+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33217 }
33218
33219 #ifdef CONFIG_PARPORT_1284
33220@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33221
33222 *ppos += len;
33223
33224- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33225+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33226 }
33227 #endif /* IEEE1284.3 support. */
33228
33229diff -urNp linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h
33230--- linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-11 15:19:27.000000000 -0500
33231+++ linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-16 18:39:07.000000000 -0500
33232@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33233 int (*hardware_test) (struct slot* slot, u32 value);
33234 u8 (*get_power) (struct slot* slot);
33235 int (*set_power) (struct slot* slot, int value);
33236-};
33237+} __no_const;
33238
33239 struct cpci_hp_controller {
33240 unsigned int irq;
33241diff -urNp linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c
33242--- linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-11 15:19:27.000000000 -0500
33243+++ linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-16 18:39:07.000000000 -0500
33244@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33245
33246 void compaq_nvram_init (void __iomem *rom_start)
33247 {
33248+
33249+#ifndef CONFIG_PAX_KERNEXEC
33250 if (rom_start) {
33251 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33252 }
33253+#endif
33254+
33255 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33256
33257 /* initialize our int15 lock */
33258diff -urNp linux-3.1.1/drivers/pci/pcie/aspm.c linux-3.1.1/drivers/pci/pcie/aspm.c
33259--- linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-11 15:19:27.000000000 -0500
33260+++ linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-16 18:39:07.000000000 -0500
33261@@ -27,9 +27,9 @@
33262 #define MODULE_PARAM_PREFIX "pcie_aspm."
33263
33264 /* Note: those are not register definitions */
33265-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33266-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33267-#define ASPM_STATE_L1 (4) /* L1 state */
33268+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33269+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33270+#define ASPM_STATE_L1 (4U) /* L1 state */
33271 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33272 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33273
33274diff -urNp linux-3.1.1/drivers/pci/probe.c linux-3.1.1/drivers/pci/probe.c
33275--- linux-3.1.1/drivers/pci/probe.c 2011-11-11 15:19:27.000000000 -0500
33276+++ linux-3.1.1/drivers/pci/probe.c 2011-11-16 18:39:07.000000000 -0500
33277@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev,
33278 u32 l, sz, mask;
33279 u16 orig_cmd;
33280
33281- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33282+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33283
33284 if (!dev->mmio_always_on) {
33285 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33286diff -urNp linux-3.1.1/drivers/pci/proc.c linux-3.1.1/drivers/pci/proc.c
33287--- linux-3.1.1/drivers/pci/proc.c 2011-11-11 15:19:27.000000000 -0500
33288+++ linux-3.1.1/drivers/pci/proc.c 2011-11-16 18:40:22.000000000 -0500
33289@@ -476,7 +476,16 @@ static const struct file_operations proc
33290 static int __init pci_proc_init(void)
33291 {
33292 struct pci_dev *dev = NULL;
33293+
33294+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33295+#ifdef CONFIG_GRKERNSEC_PROC_USER
33296+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33297+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33298+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33299+#endif
33300+#else
33301 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33302+#endif
33303 proc_create("devices", 0, proc_bus_pci_dir,
33304 &proc_bus_pci_dev_operations);
33305 proc_initialized = 1;
33306diff -urNp linux-3.1.1/drivers/pci/xen-pcifront.c linux-3.1.1/drivers/pci/xen-pcifront.c
33307--- linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-11 15:19:27.000000000 -0500
33308+++ linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-16 18:40:22.000000000 -0500
33309@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
33310 struct pcifront_sd *sd = bus->sysdata;
33311 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33312
33313+ pax_track_stack();
33314+
33315 if (verbose_request)
33316 dev_info(&pdev->xdev->dev,
33317 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
33318@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
33319 struct pcifront_sd *sd = bus->sysdata;
33320 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33321
33322+ pax_track_stack();
33323+
33324 if (verbose_request)
33325 dev_info(&pdev->xdev->dev,
33326 "write dev=%04x:%02x:%02x.%01x - "
33327@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33328 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33329 struct msi_desc *entry;
33330
33331+ pax_track_stack();
33332+
33333 if (nvec > SH_INFO_MAX_VEC) {
33334 dev_err(&dev->dev, "too much vector for pci frontend: %x."
33335 " Increase SH_INFO_MAX_VEC.\n", nvec);
33336@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33337 struct pcifront_sd *sd = dev->bus->sysdata;
33338 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33339
33340+ pax_track_stack();
33341+
33342 err = do_pci_op(pdev, &op);
33343
33344 /* What should do for error ? */
33345@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33346 struct pcifront_sd *sd = dev->bus->sysdata;
33347 struct pcifront_device *pdev = pcifront_get_pdev(sd);
33348
33349+ pax_track_stack();
33350+
33351 err = do_pci_op(pdev, &op);
33352 if (likely(!err)) {
33353 vector[0] = op.value;
33354diff -urNp linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c
33355--- linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-11 15:19:27.000000000 -0500
33356+++ linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-16 18:39:07.000000000 -0500
33357@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33358 return 0;
33359 }
33360
33361-void static hotkey_mask_warn_incomplete_mask(void)
33362+static void hotkey_mask_warn_incomplete_mask(void)
33363 {
33364 /* log only what the user can fix... */
33365 const u32 wantedmask = hotkey_driver_mask &
33366diff -urNp linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c
33367--- linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-11 15:19:27.000000000 -0500
33368+++ linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-16 18:39:07.000000000 -0500
33369@@ -59,7 +59,7 @@ do { \
33370 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33371 } while(0)
33372
33373-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33374+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33375 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33376
33377 /*
33378@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33379
33380 cpu = get_cpu();
33381 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33382+
33383+ pax_open_kernel();
33384 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33385+ pax_close_kernel();
33386
33387 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33388 spin_lock_irqsave(&pnp_bios_lock, flags);
33389@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33390 :"memory");
33391 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33392
33393+ pax_open_kernel();
33394 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33395+ pax_close_kernel();
33396+
33397 put_cpu();
33398
33399 /* If we get here and this is set then the PnP BIOS faulted on us. */
33400@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33401 return status;
33402 }
33403
33404-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33405+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33406 {
33407 int i;
33408
33409@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33410 pnp_bios_callpoint.offset = header->fields.pm16offset;
33411 pnp_bios_callpoint.segment = PNP_CS16;
33412
33413+ pax_open_kernel();
33414+
33415 for_each_possible_cpu(i) {
33416 struct desc_struct *gdt = get_cpu_gdt_table(i);
33417 if (!gdt)
33418@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33419 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33420 (unsigned long)__va(header->fields.pm16dseg));
33421 }
33422+
33423+ pax_close_kernel();
33424 }
33425diff -urNp linux-3.1.1/drivers/pnp/resource.c linux-3.1.1/drivers/pnp/resource.c
33426--- linux-3.1.1/drivers/pnp/resource.c 2011-11-11 15:19:27.000000000 -0500
33427+++ linux-3.1.1/drivers/pnp/resource.c 2011-11-16 18:39:07.000000000 -0500
33428@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33429 return 1;
33430
33431 /* check if the resource is valid */
33432- if (*irq < 0 || *irq > 15)
33433+ if (*irq > 15)
33434 return 0;
33435
33436 /* check if the resource is reserved */
33437@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33438 return 1;
33439
33440 /* check if the resource is valid */
33441- if (*dma < 0 || *dma == 4 || *dma > 7)
33442+ if (*dma == 4 || *dma > 7)
33443 return 0;
33444
33445 /* check if the resource is reserved */
33446diff -urNp linux-3.1.1/drivers/power/bq27x00_battery.c linux-3.1.1/drivers/power/bq27x00_battery.c
33447--- linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-11 15:19:27.000000000 -0500
33448+++ linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-16 18:39:07.000000000 -0500
33449@@ -67,7 +67,7 @@
33450 struct bq27x00_device_info;
33451 struct bq27x00_access_methods {
33452 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33453-};
33454+} __no_const;
33455
33456 enum bq27x00_chip { BQ27000, BQ27500 };
33457
33458diff -urNp linux-3.1.1/drivers/regulator/max8660.c linux-3.1.1/drivers/regulator/max8660.c
33459--- linux-3.1.1/drivers/regulator/max8660.c 2011-11-11 15:19:27.000000000 -0500
33460+++ linux-3.1.1/drivers/regulator/max8660.c 2011-11-16 18:39:07.000000000 -0500
33461@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33462 max8660->shadow_regs[MAX8660_OVER1] = 5;
33463 } else {
33464 /* Otherwise devices can be toggled via software */
33465- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33466- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33467+ pax_open_kernel();
33468+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33469+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33470+ pax_close_kernel();
33471 }
33472
33473 /*
33474diff -urNp linux-3.1.1/drivers/regulator/mc13892-regulator.c linux-3.1.1/drivers/regulator/mc13892-regulator.c
33475--- linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-11 15:19:27.000000000 -0500
33476+++ linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-16 18:39:07.000000000 -0500
33477@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33478 }
33479 mc13xxx_unlock(mc13892);
33480
33481- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33482+ pax_open_kernel();
33483+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33484 = mc13892_vcam_set_mode;
33485- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33486+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33487 = mc13892_vcam_get_mode;
33488+ pax_close_kernel();
33489 for (i = 0; i < pdata->num_regulators; i++) {
33490 init_data = &pdata->regulators[i];
33491 priv->regulators[i] = regulator_register(
33492diff -urNp linux-3.1.1/drivers/rtc/rtc-dev.c linux-3.1.1/drivers/rtc/rtc-dev.c
33493--- linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-11 15:19:27.000000000 -0500
33494+++ linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-16 18:40:22.000000000 -0500
33495@@ -14,6 +14,7 @@
33496 #include <linux/module.h>
33497 #include <linux/rtc.h>
33498 #include <linux/sched.h>
33499+#include <linux/grsecurity.h>
33500 #include "rtc-core.h"
33501
33502 static dev_t rtc_devt;
33503@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33504 if (copy_from_user(&tm, uarg, sizeof(tm)))
33505 return -EFAULT;
33506
33507+ gr_log_timechange();
33508+
33509 return rtc_set_time(rtc, &tm);
33510
33511 case RTC_PIE_ON:
33512diff -urNp linux-3.1.1/drivers/scsi/aacraid/aacraid.h linux-3.1.1/drivers/scsi/aacraid/aacraid.h
33513--- linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-11 15:19:27.000000000 -0500
33514+++ linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-16 18:39:07.000000000 -0500
33515@@ -492,7 +492,7 @@ struct adapter_ops
33516 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33517 /* Administrative operations */
33518 int (*adapter_comm)(struct aac_dev * dev, int comm);
33519-};
33520+} __no_const;
33521
33522 /*
33523 * Define which interrupt handler needs to be installed
33524diff -urNp linux-3.1.1/drivers/scsi/aacraid/commctrl.c linux-3.1.1/drivers/scsi/aacraid/commctrl.c
33525--- linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-11 15:19:27.000000000 -0500
33526+++ linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-16 18:40:22.000000000 -0500
33527@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33528 u32 actual_fibsize64, actual_fibsize = 0;
33529 int i;
33530
33531+ pax_track_stack();
33532
33533 if (dev->in_reset) {
33534 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33535diff -urNp linux-3.1.1/drivers/scsi/aacraid/linit.c linux-3.1.1/drivers/scsi/aacraid/linit.c
33536--- linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-11 15:19:27.000000000 -0500
33537+++ linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-16 18:39:07.000000000 -0500
33538@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33539 #elif defined(__devinitconst)
33540 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33541 #else
33542-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33543+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33544 #endif
33545 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33546 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33547diff -urNp linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c
33548--- linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-11 15:19:27.000000000 -0500
33549+++ linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-16 18:39:07.000000000 -0500
33550@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33551 .lldd_control_phy = asd_control_phy,
33552 };
33553
33554-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33555+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33556 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33557 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33558 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33559diff -urNp linux-3.1.1/drivers/scsi/bfa/bfad.c linux-3.1.1/drivers/scsi/bfa/bfad.c
33560--- linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-11 15:19:27.000000000 -0500
33561+++ linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-16 19:01:15.000000000 -0500
33562@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33563 struct bfad_vport_s *vport, *vport_new;
33564 struct bfa_fcs_driver_info_s driver_info;
33565
33566+ pax_track_stack();
33567+
33568 /* Limit min/max. xfer size to [64k-32MB] */
33569 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
33570 max_xfer_size = BFAD_MIN_SECTORS >> 1;
33571diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c
33572--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-11 15:19:27.000000000 -0500
33573+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-16 18:39:07.000000000 -0500
33574@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
33575 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33576 {
33577 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33578- struct bfa_itn_s *itn;
33579+ bfa_itn_s_no_const *itn;
33580
33581 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33582 itn->isr = isr;
33583diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h
33584--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-11 15:19:27.000000000 -0500
33585+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-16 18:39:07.000000000 -0500
33586@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33587 struct bfa_itn_s {
33588 bfa_isr_func_t isr;
33589 };
33590+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33591
33592 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33593 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33594@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33595 struct list_head iotag_tio_free_q; /* free IO resources */
33596 struct list_head iotag_unused_q; /* unused IO resources*/
33597 struct bfa_iotag_s *iotag_arr;
33598- struct bfa_itn_s *itn_arr;
33599+ bfa_itn_s_no_const *itn_arr;
33600 int num_ioim_reqs;
33601 int num_fwtio_reqs;
33602 int num_itns;
33603diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c
33604--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-11 15:19:27.000000000 -0500
33605+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-16 18:40:22.000000000 -0500
33606@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33607 u16 len, count;
33608 u16 templen;
33609
33610+ pax_track_stack();
33611+
33612 /*
33613 * get hba attributes
33614 */
33615@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33616 u8 count = 0;
33617 u16 templen;
33618
33619+ pax_track_stack();
33620+
33621 /*
33622 * get port attributes
33623 */
33624diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c
33625--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-11 15:19:27.000000000 -0500
33626+++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-16 18:40:22.000000000 -0500
33627@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33628 struct fc_rpsc_speed_info_s speeds;
33629 struct bfa_port_attr_s pport_attr;
33630
33631+ pax_track_stack();
33632+
33633 bfa_trc(port->fcs, rx_fchs->s_id);
33634 bfa_trc(port->fcs, rx_fchs->d_id);
33635
33636diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa.h linux-3.1.1/drivers/scsi/bfa/bfa.h
33637--- linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-11 15:19:27.000000000 -0500
33638+++ linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-16 18:39:07.000000000 -0500
33639@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33640 u32 *end);
33641 int cpe_vec_q0;
33642 int rme_vec_q0;
33643-};
33644+} __no_const;
33645 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33646
33647 struct bfa_faa_cbfn_s {
33648diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h
33649--- linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-11 15:19:27.000000000 -0500
33650+++ linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-16 18:39:07.000000000 -0500
33651@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33652 bfa_ioc_disable_cbfn_t disable_cbfn;
33653 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33654 bfa_ioc_reset_cbfn_t reset_cbfn;
33655-};
33656+} __no_const;
33657
33658 /*
33659 * IOC event notification mechanism.
33660@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33661 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33662 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33663 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33664-};
33665+} __no_const;
33666
33667 /*
33668 * Queue element to wait for room in request queue. FIFO order is
33669diff -urNp linux-3.1.1/drivers/scsi/BusLogic.c linux-3.1.1/drivers/scsi/BusLogic.c
33670--- linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-11 15:19:27.000000000 -0500
33671+++ linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-16 18:40:22.000000000 -0500
33672@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33673 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33674 *PrototypeHostAdapter)
33675 {
33676+ pax_track_stack();
33677+
33678 /*
33679 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33680 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33681diff -urNp linux-3.1.1/drivers/scsi/dpt_i2o.c linux-3.1.1/drivers/scsi/dpt_i2o.c
33682--- linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-11 15:19:27.000000000 -0500
33683+++ linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-16 18:40:22.000000000 -0500
33684@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33685 dma_addr_t addr;
33686 ulong flags = 0;
33687
33688+ pax_track_stack();
33689+
33690 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33691 // get user msg size in u32s
33692 if(get_user(size, &user_msg[0])){
33693@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33694 s32 rcode;
33695 dma_addr_t addr;
33696
33697+ pax_track_stack();
33698+
33699 memset(msg, 0 , sizeof(msg));
33700 len = scsi_bufflen(cmd);
33701 direction = 0x00000000;
33702diff -urNp linux-3.1.1/drivers/scsi/eata.c linux-3.1.1/drivers/scsi/eata.c
33703--- linux-3.1.1/drivers/scsi/eata.c 2011-11-11 15:19:27.000000000 -0500
33704+++ linux-3.1.1/drivers/scsi/eata.c 2011-11-16 18:40:22.000000000 -0500
33705@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33706 struct hostdata *ha;
33707 char name[16];
33708
33709+ pax_track_stack();
33710+
33711 sprintf(name, "%s%d", driver_name, j);
33712
33713 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33714diff -urNp linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c
33715--- linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-11 15:19:27.000000000 -0500
33716+++ linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-16 18:40:22.000000000 -0500
33717@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33718 } buf;
33719 int rc;
33720
33721+ pax_track_stack();
33722+
33723 fiph = (struct fip_header *)skb->data;
33724 sub = fiph->fip_subcode;
33725
33726diff -urNp linux-3.1.1/drivers/scsi/gdth.c linux-3.1.1/drivers/scsi/gdth.c
33727--- linux-3.1.1/drivers/scsi/gdth.c 2011-11-11 15:19:27.000000000 -0500
33728+++ linux-3.1.1/drivers/scsi/gdth.c 2011-11-16 18:40:22.000000000 -0500
33729@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33730 unsigned long flags;
33731 gdth_ha_str *ha;
33732
33733+ pax_track_stack();
33734+
33735 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33736 return -EFAULT;
33737 ha = gdth_find_ha(ldrv.ionode);
33738@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33739 gdth_ha_str *ha;
33740 int rval;
33741
33742+ pax_track_stack();
33743+
33744 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33745 res.number >= MAX_HDRIVES)
33746 return -EFAULT;
33747@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33748 gdth_ha_str *ha;
33749 int rval;
33750
33751+ pax_track_stack();
33752+
33753 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33754 return -EFAULT;
33755 ha = gdth_find_ha(gen.ionode);
33756@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33757 int i;
33758 gdth_cmd_str gdtcmd;
33759 char cmnd[MAX_COMMAND_SIZE];
33760+
33761+ pax_track_stack();
33762+
33763 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33764
33765 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33766diff -urNp linux-3.1.1/drivers/scsi/gdth_proc.c linux-3.1.1/drivers/scsi/gdth_proc.c
33767--- linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-11 15:19:27.000000000 -0500
33768+++ linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-16 18:40:22.000000000 -0500
33769@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33770 u64 paddr;
33771
33772 char cmnd[MAX_COMMAND_SIZE];
33773+
33774+ pax_track_stack();
33775+
33776 memset(cmnd, 0xff, 12);
33777 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33778
33779@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33780 gdth_hget_str *phg;
33781 char cmnd[MAX_COMMAND_SIZE];
33782
33783+ pax_track_stack();
33784+
33785 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33786 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33787 if (!gdtcmd || !estr)
33788diff -urNp linux-3.1.1/drivers/scsi/hosts.c linux-3.1.1/drivers/scsi/hosts.c
33789--- linux-3.1.1/drivers/scsi/hosts.c 2011-11-11 15:19:27.000000000 -0500
33790+++ linux-3.1.1/drivers/scsi/hosts.c 2011-11-16 18:39:07.000000000 -0500
33791@@ -42,7 +42,7 @@
33792 #include "scsi_logging.h"
33793
33794
33795-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33796+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33797
33798
33799 static void scsi_host_cls_release(struct device *dev)
33800@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33801 * subtract one because we increment first then return, but we need to
33802 * know what the next host number was before increment
33803 */
33804- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33805+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33806 shost->dma_channel = 0xff;
33807
33808 /* These three are default values which can be overridden */
33809diff -urNp linux-3.1.1/drivers/scsi/hpsa.c linux-3.1.1/drivers/scsi/hpsa.c
33810--- linux-3.1.1/drivers/scsi/hpsa.c 2011-11-11 15:19:27.000000000 -0500
33811+++ linux-3.1.1/drivers/scsi/hpsa.c 2011-11-16 18:39:07.000000000 -0500
33812@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33813 u32 a;
33814
33815 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33816- return h->access.command_completed(h);
33817+ return h->access->command_completed(h);
33818
33819 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33820 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33821@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33822 while (!list_empty(&h->reqQ)) {
33823 c = list_entry(h->reqQ.next, struct CommandList, list);
33824 /* can't do anything if fifo is full */
33825- if ((h->access.fifo_full(h))) {
33826+ if ((h->access->fifo_full(h))) {
33827 dev_warn(&h->pdev->dev, "fifo full\n");
33828 break;
33829 }
33830@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33831 h->Qdepth--;
33832
33833 /* Tell the controller execute command */
33834- h->access.submit_command(h, c);
33835+ h->access->submit_command(h, c);
33836
33837 /* Put job onto the completed Q */
33838 addQ(&h->cmpQ, c);
33839@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33840
33841 static inline unsigned long get_next_completion(struct ctlr_info *h)
33842 {
33843- return h->access.command_completed(h);
33844+ return h->access->command_completed(h);
33845 }
33846
33847 static inline bool interrupt_pending(struct ctlr_info *h)
33848 {
33849- return h->access.intr_pending(h);
33850+ return h->access->intr_pending(h);
33851 }
33852
33853 static inline long interrupt_not_for_us(struct ctlr_info *h)
33854 {
33855- return (h->access.intr_pending(h) == 0) ||
33856+ return (h->access->intr_pending(h) == 0) ||
33857 (h->interrupts_enabled == 0);
33858 }
33859
33860@@ -3881,7 +3881,7 @@ static int __devinit hpsa_pci_init(struc
33861 if (prod_index < 0)
33862 return -ENODEV;
33863 h->product_name = products[prod_index].product_name;
33864- h->access = *(products[prod_index].access);
33865+ h->access = products[prod_index].access;
33866
33867 if (hpsa_board_disabled(h->pdev)) {
33868 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33869@@ -4158,7 +4158,7 @@ reinit_after_soft_reset:
33870 }
33871
33872 /* make sure the board interrupts are off */
33873- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33874+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33875
33876 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33877 goto clean2;
33878@@ -4192,7 +4192,7 @@ reinit_after_soft_reset:
33879 * fake ones to scoop up any residual completions.
33880 */
33881 spin_lock_irqsave(&h->lock, flags);
33882- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33883+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33884 spin_unlock_irqrestore(&h->lock, flags);
33885 free_irq(h->intr[h->intr_mode], h);
33886 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33887@@ -4211,9 +4211,9 @@ reinit_after_soft_reset:
33888 dev_info(&h->pdev->dev, "Board READY.\n");
33889 dev_info(&h->pdev->dev,
33890 "Waiting for stale completions to drain.\n");
33891- h->access.set_intr_mask(h, HPSA_INTR_ON);
33892+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33893 msleep(10000);
33894- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33895+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33896
33897 rc = controller_reset_failed(h->cfgtable);
33898 if (rc)
33899@@ -4234,7 +4234,7 @@ reinit_after_soft_reset:
33900 }
33901
33902 /* Turn the interrupts on so we can service requests */
33903- h->access.set_intr_mask(h, HPSA_INTR_ON);
33904+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33905
33906 hpsa_hba_inquiry(h);
33907 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33908@@ -4287,7 +4287,7 @@ static void hpsa_shutdown(struct pci_dev
33909 * To write all data in the battery backed cache to disks
33910 */
33911 hpsa_flush_cache(h);
33912- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33913+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33914 free_irq(h->intr[h->intr_mode], h);
33915 #ifdef CONFIG_PCI_MSI
33916 if (h->msix_vector)
33917@@ -4450,7 +4450,7 @@ static __devinit void hpsa_enter_perform
33918 return;
33919 }
33920 /* Change the access methods to the performant access methods */
33921- h->access = SA5_performant_access;
33922+ h->access = &SA5_performant_access;
33923 h->transMethod = CFGTBL_Trans_Performant;
33924 }
33925
33926diff -urNp linux-3.1.1/drivers/scsi/hpsa.h linux-3.1.1/drivers/scsi/hpsa.h
33927--- linux-3.1.1/drivers/scsi/hpsa.h 2011-11-11 15:19:27.000000000 -0500
33928+++ linux-3.1.1/drivers/scsi/hpsa.h 2011-11-16 18:39:07.000000000 -0500
33929@@ -73,7 +73,7 @@ struct ctlr_info {
33930 unsigned int msix_vector;
33931 unsigned int msi_vector;
33932 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33933- struct access_method access;
33934+ struct access_method *access;
33935
33936 /* queue and queue Info */
33937 struct list_head reqQ;
33938diff -urNp linux-3.1.1/drivers/scsi/ips.h linux-3.1.1/drivers/scsi/ips.h
33939--- linux-3.1.1/drivers/scsi/ips.h 2011-11-11 15:19:27.000000000 -0500
33940+++ linux-3.1.1/drivers/scsi/ips.h 2011-11-16 18:39:07.000000000 -0500
33941@@ -1027,7 +1027,7 @@ typedef struct {
33942 int (*intr)(struct ips_ha *);
33943 void (*enableint)(struct ips_ha *);
33944 uint32_t (*statupd)(struct ips_ha *);
33945-} ips_hw_func_t;
33946+} __no_const ips_hw_func_t;
33947
33948 typedef struct ips_ha {
33949 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33950diff -urNp linux-3.1.1/drivers/scsi/libfc/fc_exch.c linux-3.1.1/drivers/scsi/libfc/fc_exch.c
33951--- linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-11 15:19:27.000000000 -0500
33952+++ linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-16 18:39:07.000000000 -0500
33953@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33954 * all together if not used XXX
33955 */
33956 struct {
33957- atomic_t no_free_exch;
33958- atomic_t no_free_exch_xid;
33959- atomic_t xid_not_found;
33960- atomic_t xid_busy;
33961- atomic_t seq_not_found;
33962- atomic_t non_bls_resp;
33963+ atomic_unchecked_t no_free_exch;
33964+ atomic_unchecked_t no_free_exch_xid;
33965+ atomic_unchecked_t xid_not_found;
33966+ atomic_unchecked_t xid_busy;
33967+ atomic_unchecked_t seq_not_found;
33968+ atomic_unchecked_t non_bls_resp;
33969 } stats;
33970 };
33971
33972@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(
33973 /* allocate memory for exchange */
33974 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33975 if (!ep) {
33976- atomic_inc(&mp->stats.no_free_exch);
33977+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33978 goto out;
33979 }
33980 memset(ep, 0, sizeof(*ep));
33981@@ -779,7 +779,7 @@ out:
33982 return ep;
33983 err:
33984 spin_unlock_bh(&pool->lock);
33985- atomic_inc(&mp->stats.no_free_exch_xid);
33986+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33987 mempool_free(ep, mp->ep_pool);
33988 return NULL;
33989 }
33990@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33991 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33992 ep = fc_exch_find(mp, xid);
33993 if (!ep) {
33994- atomic_inc(&mp->stats.xid_not_found);
33995+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33996 reject = FC_RJT_OX_ID;
33997 goto out;
33998 }
33999@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34000 ep = fc_exch_find(mp, xid);
34001 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34002 if (ep) {
34003- atomic_inc(&mp->stats.xid_busy);
34004+ atomic_inc_unchecked(&mp->stats.xid_busy);
34005 reject = FC_RJT_RX_ID;
34006 goto rel;
34007 }
34008@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34009 }
34010 xid = ep->xid; /* get our XID */
34011 } else if (!ep) {
34012- atomic_inc(&mp->stats.xid_not_found);
34013+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34014 reject = FC_RJT_RX_ID; /* XID not found */
34015 goto out;
34016 }
34017@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34018 } else {
34019 sp = &ep->seq;
34020 if (sp->id != fh->fh_seq_id) {
34021- atomic_inc(&mp->stats.seq_not_found);
34022+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34023 if (f_ctl & FC_FC_END_SEQ) {
34024 /*
34025 * Update sequence_id based on incoming last
34026@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct
34027
34028 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34029 if (!ep) {
34030- atomic_inc(&mp->stats.xid_not_found);
34031+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34032 goto out;
34033 }
34034 if (ep->esb_stat & ESB_ST_COMPLETE) {
34035- atomic_inc(&mp->stats.xid_not_found);
34036+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34037 goto rel;
34038 }
34039 if (ep->rxid == FC_XID_UNKNOWN)
34040 ep->rxid = ntohs(fh->fh_rx_id);
34041 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34042- atomic_inc(&mp->stats.xid_not_found);
34043+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34044 goto rel;
34045 }
34046 if (ep->did != ntoh24(fh->fh_s_id) &&
34047 ep->did != FC_FID_FLOGI) {
34048- atomic_inc(&mp->stats.xid_not_found);
34049+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34050 goto rel;
34051 }
34052 sof = fr_sof(fp);
34053@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct
34054 sp->ssb_stat |= SSB_ST_RESP;
34055 sp->id = fh->fh_seq_id;
34056 } else if (sp->id != fh->fh_seq_id) {
34057- atomic_inc(&mp->stats.seq_not_found);
34058+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34059 goto rel;
34060 }
34061
34062@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_
34063 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34064
34065 if (!sp)
34066- atomic_inc(&mp->stats.xid_not_found);
34067+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34068 else
34069- atomic_inc(&mp->stats.non_bls_resp);
34070+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34071
34072 fc_frame_free(fp);
34073 }
34074diff -urNp linux-3.1.1/drivers/scsi/libsas/sas_ata.c linux-3.1.1/drivers/scsi/libsas/sas_ata.c
34075--- linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-11 15:19:27.000000000 -0500
34076+++ linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-16 18:39:07.000000000 -0500
34077@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34078 .postreset = ata_std_postreset,
34079 .error_handler = ata_std_error_handler,
34080 .post_internal_cmd = sas_ata_post_internal,
34081- .qc_defer = ata_std_qc_defer,
34082+ .qc_defer = ata_std_qc_defer,
34083 .qc_prep = ata_noop_qc_prep,
34084 .qc_issue = sas_ata_qc_issue,
34085 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34086diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c
34087--- linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-11 15:19:27.000000000 -0500
34088+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-16 18:40:22.000000000 -0500
34089@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34090
34091 #include <linux/debugfs.h>
34092
34093-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34094+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34095 static unsigned long lpfc_debugfs_start_time = 0L;
34096
34097 /* iDiag */
34098@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34099 lpfc_debugfs_enable = 0;
34100
34101 len = 0;
34102- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34103+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34104 (lpfc_debugfs_max_disc_trc - 1);
34105 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34106 dtp = vport->disc_trc + i;
34107@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34108 lpfc_debugfs_enable = 0;
34109
34110 len = 0;
34111- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34112+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34113 (lpfc_debugfs_max_slow_ring_trc - 1);
34114 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34115 dtp = phba->slow_ring_trc + i;
34116@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34117 !vport || !vport->disc_trc)
34118 return;
34119
34120- index = atomic_inc_return(&vport->disc_trc_cnt) &
34121+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34122 (lpfc_debugfs_max_disc_trc - 1);
34123 dtp = vport->disc_trc + index;
34124 dtp->fmt = fmt;
34125 dtp->data1 = data1;
34126 dtp->data2 = data2;
34127 dtp->data3 = data3;
34128- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34129+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34130 dtp->jif = jiffies;
34131 #endif
34132 return;
34133@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34134 !phba || !phba->slow_ring_trc)
34135 return;
34136
34137- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34138+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34139 (lpfc_debugfs_max_slow_ring_trc - 1);
34140 dtp = phba->slow_ring_trc + index;
34141 dtp->fmt = fmt;
34142 dtp->data1 = data1;
34143 dtp->data2 = data2;
34144 dtp->data3 = data3;
34145- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34146+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34147 dtp->jif = jiffies;
34148 #endif
34149 return;
34150@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34151 "slow_ring buffer\n");
34152 goto debug_failed;
34153 }
34154- atomic_set(&phba->slow_ring_trc_cnt, 0);
34155+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34156 memset(phba->slow_ring_trc, 0,
34157 (sizeof(struct lpfc_debugfs_trc) *
34158 lpfc_debugfs_max_slow_ring_trc));
34159@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34160 "buffer\n");
34161 goto debug_failed;
34162 }
34163- atomic_set(&vport->disc_trc_cnt, 0);
34164+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34165
34166 snprintf(name, sizeof(name), "discovery_trace");
34167 vport->debug_disc_trc =
34168diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc.h linux-3.1.1/drivers/scsi/lpfc/lpfc.h
34169--- linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-11 15:19:27.000000000 -0500
34170+++ linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-16 18:39:07.000000000 -0500
34171@@ -425,7 +425,7 @@ struct lpfc_vport {
34172 struct dentry *debug_nodelist;
34173 struct dentry *vport_debugfs_root;
34174 struct lpfc_debugfs_trc *disc_trc;
34175- atomic_t disc_trc_cnt;
34176+ atomic_unchecked_t disc_trc_cnt;
34177 #endif
34178 uint8_t stat_data_enabled;
34179 uint8_t stat_data_blocked;
34180@@ -835,8 +835,8 @@ struct lpfc_hba {
34181 struct timer_list fabric_block_timer;
34182 unsigned long bit_flags;
34183 #define FABRIC_COMANDS_BLOCKED 0
34184- atomic_t num_rsrc_err;
34185- atomic_t num_cmd_success;
34186+ atomic_unchecked_t num_rsrc_err;
34187+ atomic_unchecked_t num_cmd_success;
34188 unsigned long last_rsrc_error_time;
34189 unsigned long last_ramp_down_time;
34190 unsigned long last_ramp_up_time;
34191@@ -850,7 +850,7 @@ struct lpfc_hba {
34192 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34193 struct dentry *debug_slow_ring_trc;
34194 struct lpfc_debugfs_trc *slow_ring_trc;
34195- atomic_t slow_ring_trc_cnt;
34196+ atomic_unchecked_t slow_ring_trc_cnt;
34197 /* iDiag debugfs sub-directory */
34198 struct dentry *idiag_root;
34199 struct dentry *idiag_pci_cfg;
34200diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c
34201--- linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-11 15:19:27.000000000 -0500
34202+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-16 18:39:07.000000000 -0500
34203@@ -9969,8 +9969,10 @@ lpfc_init(void)
34204 printk(LPFC_COPYRIGHT "\n");
34205
34206 if (lpfc_enable_npiv) {
34207- lpfc_transport_functions.vport_create = lpfc_vport_create;
34208- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34209+ pax_open_kernel();
34210+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34211+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34212+ pax_close_kernel();
34213 }
34214 lpfc_transport_template =
34215 fc_attach_transport(&lpfc_transport_functions);
34216diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c
34217--- linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-11 15:19:27.000000000 -0500
34218+++ linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-16 18:39:07.000000000 -0500
34219@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34220 uint32_t evt_posted;
34221
34222 spin_lock_irqsave(&phba->hbalock, flags);
34223- atomic_inc(&phba->num_rsrc_err);
34224+ atomic_inc_unchecked(&phba->num_rsrc_err);
34225 phba->last_rsrc_error_time = jiffies;
34226
34227 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34228@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34229 unsigned long flags;
34230 struct lpfc_hba *phba = vport->phba;
34231 uint32_t evt_posted;
34232- atomic_inc(&phba->num_cmd_success);
34233+ atomic_inc_unchecked(&phba->num_cmd_success);
34234
34235 if (vport->cfg_lun_queue_depth <= queue_depth)
34236 return;
34237@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34238 unsigned long num_rsrc_err, num_cmd_success;
34239 int i;
34240
34241- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34242- num_cmd_success = atomic_read(&phba->num_cmd_success);
34243+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34244+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34245
34246 vports = lpfc_create_vport_work_array(phba);
34247 if (vports != NULL)
34248@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34249 }
34250 }
34251 lpfc_destroy_vport_work_array(phba, vports);
34252- atomic_set(&phba->num_rsrc_err, 0);
34253- atomic_set(&phba->num_cmd_success, 0);
34254+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34255+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34256 }
34257
34258 /**
34259@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34260 }
34261 }
34262 lpfc_destroy_vport_work_array(phba, vports);
34263- atomic_set(&phba->num_rsrc_err, 0);
34264- atomic_set(&phba->num_cmd_success, 0);
34265+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34266+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34267 }
34268
34269 /**
34270diff -urNp linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c
34271--- linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-11 15:19:27.000000000 -0500
34272+++ linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-16 18:40:22.000000000 -0500
34273@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34274 int rval;
34275 int i;
34276
34277+ pax_track_stack();
34278+
34279 // Allocate memory for the base list of scb for management module.
34280 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34281
34282diff -urNp linux-3.1.1/drivers/scsi/osd/osd_initiator.c linux-3.1.1/drivers/scsi/osd/osd_initiator.c
34283--- linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-11 15:19:27.000000000 -0500
34284+++ linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-16 18:40:22.000000000 -0500
34285@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
34286 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34287 int ret;
34288
34289+ pax_track_stack();
34290+
34291 or = osd_start_request(od, GFP_KERNEL);
34292 if (!or)
34293 return -ENOMEM;
34294diff -urNp linux-3.1.1/drivers/scsi/pmcraid.c linux-3.1.1/drivers/scsi/pmcraid.c
34295--- linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-11 15:19:27.000000000 -0500
34296+++ linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-16 18:39:07.000000000 -0500
34297@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
34298 res->scsi_dev = scsi_dev;
34299 scsi_dev->hostdata = res;
34300 res->change_detected = 0;
34301- atomic_set(&res->read_failures, 0);
34302- atomic_set(&res->write_failures, 0);
34303+ atomic_set_unchecked(&res->read_failures, 0);
34304+ atomic_set_unchecked(&res->write_failures, 0);
34305 rc = 0;
34306 }
34307 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34308@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
34309
34310 /* If this was a SCSI read/write command keep count of errors */
34311 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34312- atomic_inc(&res->read_failures);
34313+ atomic_inc_unchecked(&res->read_failures);
34314 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34315- atomic_inc(&res->write_failures);
34316+ atomic_inc_unchecked(&res->write_failures);
34317
34318 if (!RES_IS_GSCSI(res->cfg_entry) &&
34319 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34320@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
34321 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34322 * hrrq_id assigned here in queuecommand
34323 */
34324- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34325+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34326 pinstance->num_hrrq;
34327 cmd->cmd_done = pmcraid_io_done;
34328
34329@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
34330 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34331 * hrrq_id assigned here in queuecommand
34332 */
34333- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34334+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34335 pinstance->num_hrrq;
34336
34337 if (request_size) {
34338@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
34339
34340 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34341 /* add resources only after host is added into system */
34342- if (!atomic_read(&pinstance->expose_resources))
34343+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34344 return;
34345
34346 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34347@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
34348 init_waitqueue_head(&pinstance->reset_wait_q);
34349
34350 atomic_set(&pinstance->outstanding_cmds, 0);
34351- atomic_set(&pinstance->last_message_id, 0);
34352- atomic_set(&pinstance->expose_resources, 0);
34353+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34354+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34355
34356 INIT_LIST_HEAD(&pinstance->free_res_q);
34357 INIT_LIST_HEAD(&pinstance->used_res_q);
34358@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34359 /* Schedule worker thread to handle CCN and take care of adding and
34360 * removing devices to OS
34361 */
34362- atomic_set(&pinstance->expose_resources, 1);
34363+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34364 schedule_work(&pinstance->worker_q);
34365 return rc;
34366
34367diff -urNp linux-3.1.1/drivers/scsi/pmcraid.h linux-3.1.1/drivers/scsi/pmcraid.h
34368--- linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-11 15:19:27.000000000 -0500
34369+++ linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-16 18:39:07.000000000 -0500
34370@@ -749,7 +749,7 @@ struct pmcraid_instance {
34371 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34372
34373 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34374- atomic_t last_message_id;
34375+ atomic_unchecked_t last_message_id;
34376
34377 /* configuration table */
34378 struct pmcraid_config_table *cfg_table;
34379@@ -778,7 +778,7 @@ struct pmcraid_instance {
34380 atomic_t outstanding_cmds;
34381
34382 /* should add/delete resources to mid-layer now ?*/
34383- atomic_t expose_resources;
34384+ atomic_unchecked_t expose_resources;
34385
34386
34387
34388@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34389 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34390 };
34391 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34392- atomic_t read_failures; /* count of failed READ commands */
34393- atomic_t write_failures; /* count of failed WRITE commands */
34394+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34395+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34396
34397 /* To indicate add/delete/modify during CCN */
34398 u8 change_detected;
34399diff -urNp linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h
34400--- linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-11 15:19:27.000000000 -0500
34401+++ linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-16 18:39:07.000000000 -0500
34402@@ -2244,7 +2244,7 @@ struct isp_operations {
34403 int (*get_flash_version) (struct scsi_qla_host *, void *);
34404 int (*start_scsi) (srb_t *);
34405 int (*abort_isp) (struct scsi_qla_host *);
34406-};
34407+} __no_const;
34408
34409 /* MSI-X Support *************************************************************/
34410
34411diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h
34412--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-11 15:19:27.000000000 -0500
34413+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-16 18:39:07.000000000 -0500
34414@@ -256,7 +256,7 @@ struct ddb_entry {
34415 atomic_t retry_relogin_timer; /* Min Time between relogins
34416 * (4000 only) */
34417 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34418- atomic_t relogin_retry_count; /* Num of times relogin has been
34419+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34420 * retried */
34421
34422 uint16_t port;
34423diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c
34424--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-11 15:19:27.000000000 -0500
34425+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-16 18:39:07.000000000 -0500
34426@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34427 ddb_entry->fw_ddb_index = fw_ddb_index;
34428 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34429 atomic_set(&ddb_entry->relogin_timer, 0);
34430- atomic_set(&ddb_entry->relogin_retry_count, 0);
34431+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34432 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34433 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34434 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34435@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34436 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34437 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34438 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34439- atomic_set(&ddb_entry->relogin_retry_count, 0);
34440+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34441 atomic_set(&ddb_entry->relogin_timer, 0);
34442 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34443 iscsi_unblock_session(ddb_entry->sess);
34444diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c
34445--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-11 15:19:27.000000000 -0500
34446+++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-16 18:39:07.000000000 -0500
34447@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34448 ddb_entry->fw_ddb_device_state ==
34449 DDB_DS_SESSION_FAILED) {
34450 /* Reset retry relogin timer */
34451- atomic_inc(&ddb_entry->relogin_retry_count);
34452+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34453 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34454 " timed out-retrying"
34455 " relogin (%d)\n",
34456 ha->host_no,
34457 ddb_entry->fw_ddb_index,
34458- atomic_read(&ddb_entry->
34459+ atomic_read_unchecked(&ddb_entry->
34460 relogin_retry_count))
34461 );
34462 start_dpc++;
34463diff -urNp linux-3.1.1/drivers/scsi/scsi.c linux-3.1.1/drivers/scsi/scsi.c
34464--- linux-3.1.1/drivers/scsi/scsi.c 2011-11-11 15:19:27.000000000 -0500
34465+++ linux-3.1.1/drivers/scsi/scsi.c 2011-11-16 18:39:07.000000000 -0500
34466@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34467 unsigned long timeout;
34468 int rtn = 0;
34469
34470- atomic_inc(&cmd->device->iorequest_cnt);
34471+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34472
34473 /* check if the device is still usable */
34474 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34475diff -urNp linux-3.1.1/drivers/scsi/scsi_debug.c linux-3.1.1/drivers/scsi/scsi_debug.c
34476--- linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-11 15:19:27.000000000 -0500
34477+++ linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-16 18:40:22.000000000 -0500
34478@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34479 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34480 unsigned char *cmd = (unsigned char *)scp->cmnd;
34481
34482+ pax_track_stack();
34483+
34484 if ((errsts = check_readiness(scp, 1, devip)))
34485 return errsts;
34486 memset(arr, 0, sizeof(arr));
34487@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34488 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34489 unsigned char *cmd = (unsigned char *)scp->cmnd;
34490
34491+ pax_track_stack();
34492+
34493 if ((errsts = check_readiness(scp, 1, devip)))
34494 return errsts;
34495 memset(arr, 0, sizeof(arr));
34496diff -urNp linux-3.1.1/drivers/scsi/scsi_lib.c linux-3.1.1/drivers/scsi/scsi_lib.c
34497--- linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-11 15:19:27.000000000 -0500
34498+++ linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-16 18:39:07.000000000 -0500
34499@@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct req
34500 shost = sdev->host;
34501 scsi_init_cmd_errh(cmd);
34502 cmd->result = DID_NO_CONNECT << 16;
34503- atomic_inc(&cmd->device->iorequest_cnt);
34504+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34505
34506 /*
34507 * SCSI request completion path will do scsi_device_unbusy(),
34508@@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct req
34509
34510 INIT_LIST_HEAD(&cmd->eh_entry);
34511
34512- atomic_inc(&cmd->device->iodone_cnt);
34513+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34514 if (cmd->result)
34515- atomic_inc(&cmd->device->ioerr_cnt);
34516+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34517
34518 disposition = scsi_decide_disposition(cmd);
34519 if (disposition != SUCCESS &&
34520diff -urNp linux-3.1.1/drivers/scsi/scsi_sysfs.c linux-3.1.1/drivers/scsi/scsi_sysfs.c
34521--- linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-11 15:19:27.000000000 -0500
34522+++ linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-16 18:39:07.000000000 -0500
34523@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34524 char *buf) \
34525 { \
34526 struct scsi_device *sdev = to_scsi_device(dev); \
34527- unsigned long long count = atomic_read(&sdev->field); \
34528+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34529 return snprintf(buf, 20, "0x%llx\n", count); \
34530 } \
34531 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34532diff -urNp linux-3.1.1/drivers/scsi/scsi_tgt_lib.c linux-3.1.1/drivers/scsi/scsi_tgt_lib.c
34533--- linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-11 15:19:27.000000000 -0500
34534+++ linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-16 18:39:07.000000000 -0500
34535@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34536 int err;
34537
34538 dprintk("%lx %u\n", uaddr, len);
34539- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34540+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34541 if (err) {
34542 /*
34543 * TODO: need to fixup sg_tablesize, max_segment_size,
34544diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_fc.c linux-3.1.1/drivers/scsi/scsi_transport_fc.c
34545--- linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-11 15:19:27.000000000 -0500
34546+++ linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-16 18:39:07.000000000 -0500
34547@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34548 * Netlink Infrastructure
34549 */
34550
34551-static atomic_t fc_event_seq;
34552+static atomic_unchecked_t fc_event_seq;
34553
34554 /**
34555 * fc_get_event_number - Obtain the next sequential FC event number
34556@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34557 u32
34558 fc_get_event_number(void)
34559 {
34560- return atomic_add_return(1, &fc_event_seq);
34561+ return atomic_add_return_unchecked(1, &fc_event_seq);
34562 }
34563 EXPORT_SYMBOL(fc_get_event_number);
34564
34565@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34566 {
34567 int error;
34568
34569- atomic_set(&fc_event_seq, 0);
34570+ atomic_set_unchecked(&fc_event_seq, 0);
34571
34572 error = transport_class_register(&fc_host_class);
34573 if (error)
34574@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34575 char *cp;
34576
34577 *val = simple_strtoul(buf, &cp, 0);
34578- if ((*cp && (*cp != '\n')) || (*val < 0))
34579+ if (*cp && (*cp != '\n'))
34580 return -EINVAL;
34581 /*
34582 * Check for overflow; dev_loss_tmo is u32
34583diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c
34584--- linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-11 15:19:27.000000000 -0500
34585+++ linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-16 18:39:07.000000000 -0500
34586@@ -83,7 +83,7 @@ struct iscsi_internal {
34587 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34588 };
34589
34590-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34591+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34592 static struct workqueue_struct *iscsi_eh_timer_workq;
34593
34594 /*
34595@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34596 int err;
34597
34598 ihost = shost->shost_data;
34599- session->sid = atomic_add_return(1, &iscsi_session_nr);
34600+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34601
34602 if (id == ISCSI_MAX_TARGET) {
34603 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34604@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34605 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34606 ISCSI_TRANSPORT_VERSION);
34607
34608- atomic_set(&iscsi_session_nr, 0);
34609+ atomic_set_unchecked(&iscsi_session_nr, 0);
34610
34611 err = class_register(&iscsi_transport_class);
34612 if (err)
34613diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_srp.c linux-3.1.1/drivers/scsi/scsi_transport_srp.c
34614--- linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-11 15:19:27.000000000 -0500
34615+++ linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-16 18:39:07.000000000 -0500
34616@@ -33,7 +33,7 @@
34617 #include "scsi_transport_srp_internal.h"
34618
34619 struct srp_host_attrs {
34620- atomic_t next_port_id;
34621+ atomic_unchecked_t next_port_id;
34622 };
34623 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34624
34625@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34626 struct Scsi_Host *shost = dev_to_shost(dev);
34627 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34628
34629- atomic_set(&srp_host->next_port_id, 0);
34630+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34631 return 0;
34632 }
34633
34634@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34635 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34636 rport->roles = ids->roles;
34637
34638- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34639+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34640 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34641
34642 transport_setup_device(&rport->dev);
34643diff -urNp linux-3.1.1/drivers/scsi/sg.c linux-3.1.1/drivers/scsi/sg.c
34644--- linux-3.1.1/drivers/scsi/sg.c 2011-11-11 15:19:27.000000000 -0500
34645+++ linux-3.1.1/drivers/scsi/sg.c 2011-11-16 18:39:07.000000000 -0500
34646@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34647 sdp->disk->disk_name,
34648 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34649 NULL,
34650- (char *)arg);
34651+ (char __user *)arg);
34652 case BLKTRACESTART:
34653 return blk_trace_startstop(sdp->device->request_queue, 1);
34654 case BLKTRACESTOP:
34655@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34656 const struct file_operations * fops;
34657 };
34658
34659-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34660+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34661 {"allow_dio", &adio_fops},
34662 {"debug", &debug_fops},
34663 {"def_reserved_size", &dressz_fops},
34664@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34665 {
34666 int k, mask;
34667 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34668- struct sg_proc_leaf * leaf;
34669+ const struct sg_proc_leaf * leaf;
34670
34671 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34672 if (!sg_proc_sgp)
34673diff -urNp linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c
34674--- linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-11 15:19:27.000000000 -0500
34675+++ linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-16 18:40:22.000000000 -0500
34676@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34677 int do_iounmap = 0;
34678 int do_disable_device = 1;
34679
34680+ pax_track_stack();
34681+
34682 memset(&sym_dev, 0, sizeof(sym_dev));
34683 memset(&nvram, 0, sizeof(nvram));
34684 sym_dev.pdev = pdev;
34685diff -urNp linux-3.1.1/drivers/scsi/vmw_pvscsi.c linux-3.1.1/drivers/scsi/vmw_pvscsi.c
34686--- linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-11 15:19:27.000000000 -0500
34687+++ linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-16 18:40:22.000000000 -0500
34688@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34689 dma_addr_t base;
34690 unsigned i;
34691
34692+ pax_track_stack();
34693+
34694 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34695 cmd.reqRingNumPages = adapter->req_pages;
34696 cmd.cmpRingNumPages = adapter->cmp_pages;
34697diff -urNp linux-3.1.1/drivers/spi/spi.c linux-3.1.1/drivers/spi/spi.c
34698--- linux-3.1.1/drivers/spi/spi.c 2011-11-11 15:19:27.000000000 -0500
34699+++ linux-3.1.1/drivers/spi/spi.c 2011-11-16 18:39:07.000000000 -0500
34700@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34701 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34702
34703 /* portable code must never pass more than 32 bytes */
34704-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34705+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34706
34707 static u8 *buf;
34708
34709diff -urNp linux-3.1.1/drivers/spi/spi-dw-pci.c linux-3.1.1/drivers/spi/spi-dw-pci.c
34710--- linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-11 15:19:27.000000000 -0500
34711+++ linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-16 18:39:07.000000000 -0500
34712@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34713 #define spi_resume NULL
34714 #endif
34715
34716-static const struct pci_device_id pci_ids[] __devinitdata = {
34717+static const struct pci_device_id pci_ids[] __devinitconst = {
34718 /* Intel MID platform SPI controller 0 */
34719 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34720 {},
34721diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34722--- linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-11 15:19:27.000000000 -0500
34723+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-16 18:39:07.000000000 -0500
34724@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34725 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34726
34727
34728-static struct net_device_ops ar6000_netdev_ops = {
34729+static net_device_ops_no_const ar6000_netdev_ops = {
34730 .ndo_init = NULL,
34731 .ndo_open = ar6000_open,
34732 .ndo_stop = ar6000_close,
34733diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34734--- linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-11 15:19:27.000000000 -0500
34735+++ linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-16 18:39:07.000000000 -0500
34736@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34737 typedef struct ar6k_pal_config_s
34738 {
34739 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34740-}ar6k_pal_config_t;
34741+} __no_const ar6k_pal_config_t;
34742
34743 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34744 #endif /* _AR6K_PAL_H_ */
34745diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34746--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-11 15:19:27.000000000 -0500
34747+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-16 18:39:07.000000000 -0500
34748@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if
34749 free_netdev(ifp->net);
34750 }
34751 /* Allocate etherdev, including space for private structure */
34752- ifp->net = alloc_etherdev(sizeof(drvr_priv));
34753+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
34754 if (!ifp->net) {
34755 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34756 ret = -ENOMEM;
34757 }
34758 if (ret == 0) {
34759 strcpy(ifp->net->name, ifp->name);
34760- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
34761+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
34762 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
34763 if (err != 0) {
34764 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
34765@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct br
34766 BRCMF_TRACE(("%s: Enter\n", __func__));
34767
34768 /* Allocate etherdev, including space for private structure */
34769- net = alloc_etherdev(sizeof(drvr_priv));
34770+ net = alloc_etherdev(sizeof(*drvr_priv));
34771 if (!net) {
34772 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34773 goto fail;
34774@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct br
34775 /*
34776 * Save the brcmf_info into the priv
34777 */
34778- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34779+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34780
34781 /* Set network interface name if it was provided as module parameter */
34782 if (iface_name[0]) {
34783@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct br
34784 /*
34785 * Save the brcmf_info into the priv
34786 */
34787- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34788+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34789
34790 #if defined(CONFIG_PM_SLEEP)
34791 atomic_set(&brcmf_mmc_suspend, false);
34792diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h
34793--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-11 15:19:27.000000000 -0500
34794+++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-16 18:39:07.000000000 -0500
34795@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
34796 u16 func, uint bustype, u32 regsva, void *param);
34797 /* detach from device */
34798 void (*detach) (void *ch);
34799-};
34800+} __no_const;
34801
34802 struct sdioh_info;
34803
34804diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
34805--- linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-11 15:19:27.000000000 -0500
34806+++ linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-16 18:39:07.000000000 -0500
34807@@ -591,7 +591,7 @@ struct phy_func_ptr {
34808 initfn_t carrsuppr;
34809 rxsigpwrfn_t rxsigpwr;
34810 detachfn_t detach;
34811-};
34812+} __no_const;
34813
34814 struct brcms_phy {
34815 struct brcms_phy_pub pubpi_ro;
34816diff -urNp linux-3.1.1/drivers/staging/et131x/et1310_tx.c linux-3.1.1/drivers/staging/et131x/et1310_tx.c
34817--- linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-11 15:19:27.000000000 -0500
34818+++ linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-16 18:39:07.000000000 -0500
34819@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34820 struct net_device_stats *stats = &etdev->net_stats;
34821
34822 if (tcb->flags & fMP_DEST_BROAD)
34823- atomic_inc(&etdev->stats.brdcstxmt);
34824+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
34825 else if (tcb->flags & fMP_DEST_MULTI)
34826- atomic_inc(&etdev->stats.multixmt);
34827+ atomic_inc_unchecked(&etdev->stats.multixmt);
34828 else
34829- atomic_inc(&etdev->stats.unixmt);
34830+ atomic_inc_unchecked(&etdev->stats.unixmt);
34831
34832 if (tcb->skb) {
34833 stats->tx_bytes += tcb->skb->len;
34834diff -urNp linux-3.1.1/drivers/staging/et131x/et131x_adapter.h linux-3.1.1/drivers/staging/et131x/et131x_adapter.h
34835--- linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-11 15:19:27.000000000 -0500
34836+++ linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-16 18:39:07.000000000 -0500
34837@@ -106,11 +106,11 @@ struct ce_stats {
34838 * operations
34839 */
34840 u32 unircv; /* # multicast packets received */
34841- atomic_t unixmt; /* # multicast packets for Tx */
34842+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34843 u32 multircv; /* # multicast packets received */
34844- atomic_t multixmt; /* # multicast packets for Tx */
34845+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34846 u32 brdcstrcv; /* # broadcast packets received */
34847- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34848+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34849 u32 norcvbuf; /* # Rx packets discarded */
34850 u32 noxmtbuf; /* # Tx packets discarded */
34851
34852diff -urNp linux-3.1.1/drivers/staging/hv/channel.c linux-3.1.1/drivers/staging/hv/channel.c
34853--- linux-3.1.1/drivers/staging/hv/channel.c 2011-11-11 15:19:27.000000000 -0500
34854+++ linux-3.1.1/drivers/staging/hv/channel.c 2011-11-16 18:39:07.000000000 -0500
34855@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34856 int ret = 0;
34857 int t;
34858
34859- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34860- atomic_inc(&vmbus_connection.next_gpadl_handle);
34861+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34862+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34863
34864 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34865 if (ret)
34866diff -urNp linux-3.1.1/drivers/staging/hv/hv.c linux-3.1.1/drivers/staging/hv/hv.c
34867--- linux-3.1.1/drivers/staging/hv/hv.c 2011-11-11 15:19:27.000000000 -0500
34868+++ linux-3.1.1/drivers/staging/hv/hv.c 2011-11-16 18:39:07.000000000 -0500
34869@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34870 u64 output_address = (output) ? virt_to_phys(output) : 0;
34871 u32 output_address_hi = output_address >> 32;
34872 u32 output_address_lo = output_address & 0xFFFFFFFF;
34873- volatile void *hypercall_page = hv_context.hypercall_page;
34874+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34875
34876 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34877 "=a"(hv_status_lo) : "d" (control_hi),
34878diff -urNp linux-3.1.1/drivers/staging/hv/hv_mouse.c linux-3.1.1/drivers/staging/hv/hv_mouse.c
34879--- linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-11 15:19:27.000000000 -0500
34880+++ linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-16 18:39:07.000000000 -0500
34881@@ -878,8 +878,10 @@ static void reportdesc_callback(struct h
34882 if (hid_dev) {
34883 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34884
34885- hid_dev->ll_driver->open = mousevsc_hid_open;
34886- hid_dev->ll_driver->close = mousevsc_hid_close;
34887+ pax_open_kernel();
34888+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34889+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34890+ pax_close_kernel();
34891
34892 hid_dev->bus = BUS_VIRTUAL;
34893 hid_dev->vendor = input_device_ctx->device_info.vendor;
34894diff -urNp linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h
34895--- linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-11 15:19:27.000000000 -0500
34896+++ linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-16 18:39:07.000000000 -0500
34897@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34898 struct vmbus_connection {
34899 enum vmbus_connect_state conn_state;
34900
34901- atomic_t next_gpadl_handle;
34902+ atomic_unchecked_t next_gpadl_handle;
34903
34904 /*
34905 * Represents channel interrupts. Each bit position represents a
34906diff -urNp linux-3.1.1/drivers/staging/hv/rndis_filter.c linux-3.1.1/drivers/staging/hv/rndis_filter.c
34907--- linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-11 15:19:27.000000000 -0500
34908+++ linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-16 18:39:07.000000000 -0500
34909@@ -43,7 +43,7 @@ struct rndis_device {
34910
34911 enum rndis_device_state state;
34912 u32 link_stat;
34913- atomic_t new_req_id;
34914+ atomic_unchecked_t new_req_id;
34915
34916 spinlock_t request_lock;
34917 struct list_head req_list;
34918@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34919 * template
34920 */
34921 set = &rndis_msg->msg.set_req;
34922- set->req_id = atomic_inc_return(&dev->new_req_id);
34923+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34924
34925 /* Add to the request list */
34926 spin_lock_irqsave(&dev->request_lock, flags);
34927@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(str
34928
34929 /* Setup the rndis set */
34930 halt = &request->request_msg.msg.halt_req;
34931- halt->req_id = atomic_inc_return(&dev->new_req_id);
34932+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34933
34934 /* Ignore return since this msg is optional. */
34935 rndis_filter_send_request(dev, request);
34936diff -urNp linux-3.1.1/drivers/staging/hv/vmbus_drv.c linux-3.1.1/drivers/staging/hv/vmbus_drv.c
34937--- linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-11 15:19:27.000000000 -0500
34938+++ linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-16 18:39:07.000000000 -0500
34939@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct h
34940 {
34941 int ret = 0;
34942
34943- static atomic_t device_num = ATOMIC_INIT(0);
34944+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34945
34946 /* Set the device name. Otherwise, device_register() will fail. */
34947 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34948- atomic_inc_return(&device_num));
34949+ atomic_inc_return_unchecked(&device_num));
34950
34951 /* The new device belongs to this bus */
34952 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34953diff -urNp linux-3.1.1/drivers/staging/iio/ring_generic.h linux-3.1.1/drivers/staging/iio/ring_generic.h
34954--- linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-11 15:19:27.000000000 -0500
34955+++ linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-16 18:39:07.000000000 -0500
34956@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34957
34958 int (*is_enabled)(struct iio_ring_buffer *ring);
34959 int (*enable)(struct iio_ring_buffer *ring);
34960-};
34961+} __no_const;
34962
34963 struct iio_ring_setup_ops {
34964 int (*preenable)(struct iio_dev *);
34965diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet.c linux-3.1.1/drivers/staging/octeon/ethernet.c
34966--- linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-11 15:19:27.000000000 -0500
34967+++ linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-16 18:39:07.000000000 -0500
34968@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34969 * since the RX tasklet also increments it.
34970 */
34971 #ifdef CONFIG_64BIT
34972- atomic64_add(rx_status.dropped_packets,
34973- (atomic64_t *)&priv->stats.rx_dropped);
34974+ atomic64_add_unchecked(rx_status.dropped_packets,
34975+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34976 #else
34977- atomic_add(rx_status.dropped_packets,
34978- (atomic_t *)&priv->stats.rx_dropped);
34979+ atomic_add_unchecked(rx_status.dropped_packets,
34980+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34981 #endif
34982 }
34983
34984diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet-rx.c linux-3.1.1/drivers/staging/octeon/ethernet-rx.c
34985--- linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-11 15:19:27.000000000 -0500
34986+++ linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-16 18:39:07.000000000 -0500
34987@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi
34988 /* Increment RX stats for virtual ports */
34989 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34990 #ifdef CONFIG_64BIT
34991- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34992- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34993+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34994+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34995 #else
34996- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34997- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34998+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34999+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35000 #endif
35001 }
35002 netif_receive_skb(skb);
35003@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi
35004 dev->name);
35005 */
35006 #ifdef CONFIG_64BIT
35007- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35008+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35009 #else
35010- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35011+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35012 #endif
35013 dev_kfree_skb_irq(skb);
35014 }
35015diff -urNp linux-3.1.1/drivers/staging/pohmelfs/inode.c linux-3.1.1/drivers/staging/pohmelfs/inode.c
35016--- linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-11 15:19:27.000000000 -0500
35017+++ linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-16 18:39:07.000000000 -0500
35018@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct su
35019 mutex_init(&psb->mcache_lock);
35020 psb->mcache_root = RB_ROOT;
35021 psb->mcache_timeout = msecs_to_jiffies(5000);
35022- atomic_long_set(&psb->mcache_gen, 0);
35023+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35024
35025 psb->trans_max_pages = 100;
35026
35027@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct su
35028 INIT_LIST_HEAD(&psb->crypto_ready_list);
35029 INIT_LIST_HEAD(&psb->crypto_active_list);
35030
35031- atomic_set(&psb->trans_gen, 1);
35032+ atomic_set_unchecked(&psb->trans_gen, 1);
35033 atomic_long_set(&psb->total_inodes, 0);
35034
35035 mutex_init(&psb->state_lock);
35036diff -urNp linux-3.1.1/drivers/staging/pohmelfs/mcache.c linux-3.1.1/drivers/staging/pohmelfs/mcache.c
35037--- linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-11 15:19:27.000000000 -0500
35038+++ linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-16 18:39:07.000000000 -0500
35039@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35040 m->data = data;
35041 m->start = start;
35042 m->size = size;
35043- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35044+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35045
35046 mutex_lock(&psb->mcache_lock);
35047 err = pohmelfs_mcache_insert(psb, m);
35048diff -urNp linux-3.1.1/drivers/staging/pohmelfs/netfs.h linux-3.1.1/drivers/staging/pohmelfs/netfs.h
35049--- linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-11 15:19:27.000000000 -0500
35050+++ linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-16 18:39:07.000000000 -0500
35051@@ -571,14 +571,14 @@ struct pohmelfs_config;
35052 struct pohmelfs_sb {
35053 struct rb_root mcache_root;
35054 struct mutex mcache_lock;
35055- atomic_long_t mcache_gen;
35056+ atomic_long_unchecked_t mcache_gen;
35057 unsigned long mcache_timeout;
35058
35059 unsigned int idx;
35060
35061 unsigned int trans_retries;
35062
35063- atomic_t trans_gen;
35064+ atomic_unchecked_t trans_gen;
35065
35066 unsigned int crypto_attached_size;
35067 unsigned int crypto_align_size;
35068diff -urNp linux-3.1.1/drivers/staging/pohmelfs/trans.c linux-3.1.1/drivers/staging/pohmelfs/trans.c
35069--- linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-11 15:19:27.000000000 -0500
35070+++ linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-16 18:39:07.000000000 -0500
35071@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35072 int err;
35073 struct netfs_cmd *cmd = t->iovec.iov_base;
35074
35075- t->gen = atomic_inc_return(&psb->trans_gen);
35076+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35077
35078 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35079 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35080diff -urNp linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h
35081--- linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-11 15:19:27.000000000 -0500
35082+++ linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-16 18:39:07.000000000 -0500
35083@@ -83,7 +83,7 @@ struct _io_ops {
35084 u8 *pmem);
35085 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35086 u8 *pmem);
35087-};
35088+} __no_const;
35089
35090 struct io_req {
35091 struct list_head list;
35092diff -urNp linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c
35093--- linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-11 15:19:27.000000000 -0500
35094+++ linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-16 18:39:08.000000000 -0500
35095@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
35096 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35097
35098 if (rlen)
35099- if (copy_to_user(data, &resp, rlen))
35100+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35101 return -EFAULT;
35102
35103 return 0;
35104diff -urNp linux-3.1.1/drivers/staging/usbip/usbip_common.h linux-3.1.1/drivers/staging/usbip/usbip_common.h
35105--- linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-11 15:19:27.000000000 -0500
35106+++ linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-16 18:39:08.000000000 -0500
35107@@ -289,7 +289,7 @@ struct usbip_device {
35108 void (*shutdown)(struct usbip_device *);
35109 void (*reset)(struct usbip_device *);
35110 void (*unusable)(struct usbip_device *);
35111- } eh_ops;
35112+ } __no_const eh_ops;
35113 };
35114
35115 #if 0
35116diff -urNp linux-3.1.1/drivers/staging/usbip/vhci.h linux-3.1.1/drivers/staging/usbip/vhci.h
35117--- linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-11 15:19:27.000000000 -0500
35118+++ linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-16 18:39:08.000000000 -0500
35119@@ -85,7 +85,7 @@ struct vhci_hcd {
35120 unsigned resuming:1;
35121 unsigned long re_timeout;
35122
35123- atomic_t seqnum;
35124+ atomic_unchecked_t seqnum;
35125
35126 /*
35127 * NOTE:
35128diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_hcd.c linux-3.1.1/drivers/staging/usbip/vhci_hcd.c
35129--- linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-11 15:19:27.000000000 -0500
35130+++ linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-16 18:39:08.000000000 -0500
35131@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35132 return;
35133 }
35134
35135- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35136+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35137 if (priv->seqnum == 0xffff)
35138 dev_info(&urb->dev->dev, "seqnum max\n");
35139
35140@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_h
35141 return -ENOMEM;
35142 }
35143
35144- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35145+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35146 if (unlink->seqnum == 0xffff)
35147 pr_info("seqnum max\n");
35148
35149@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hc
35150 vdev->rhport = rhport;
35151 }
35152
35153- atomic_set(&vhci->seqnum, 0);
35154+ atomic_set_unchecked(&vhci->seqnum, 0);
35155 spin_lock_init(&vhci->lock);
35156
35157 hcd->power_budget = 0; /* no limit */
35158diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_rx.c linux-3.1.1/drivers/staging/usbip/vhci_rx.c
35159--- linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-11 15:19:27.000000000 -0500
35160+++ linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-16 18:39:08.000000000 -0500
35161@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
35162 if (!urb) {
35163 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35164 pr_info("max seqnum %d\n",
35165- atomic_read(&the_controller->seqnum));
35166+ atomic_read_unchecked(&the_controller->seqnum));
35167 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35168 return;
35169 }
35170diff -urNp linux-3.1.1/drivers/staging/vt6655/hostap.c linux-3.1.1/drivers/staging/vt6655/hostap.c
35171--- linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-11 15:19:27.000000000 -0500
35172+++ linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-16 18:39:08.000000000 -0500
35173@@ -79,14 +79,13 @@ static int msglevel
35174 *
35175 */
35176
35177+static net_device_ops_no_const apdev_netdev_ops;
35178+
35179 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35180 {
35181 PSDevice apdev_priv;
35182 struct net_device *dev = pDevice->dev;
35183 int ret;
35184- const struct net_device_ops apdev_netdev_ops = {
35185- .ndo_start_xmit = pDevice->tx_80211,
35186- };
35187
35188 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35189
35190@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
35191 *apdev_priv = *pDevice;
35192 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35193
35194+ /* only half broken now */
35195+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35196 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35197
35198 pDevice->apdev->type = ARPHRD_IEEE80211;
35199diff -urNp linux-3.1.1/drivers/staging/vt6656/hostap.c linux-3.1.1/drivers/staging/vt6656/hostap.c
35200--- linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-11 15:19:27.000000000 -0500
35201+++ linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-16 18:39:08.000000000 -0500
35202@@ -80,14 +80,13 @@ static int msglevel
35203 *
35204 */
35205
35206+static net_device_ops_no_const apdev_netdev_ops;
35207+
35208 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35209 {
35210 PSDevice apdev_priv;
35211 struct net_device *dev = pDevice->dev;
35212 int ret;
35213- const struct net_device_ops apdev_netdev_ops = {
35214- .ndo_start_xmit = pDevice->tx_80211,
35215- };
35216
35217 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35218
35219@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
35220 *apdev_priv = *pDevice;
35221 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35222
35223+ /* only half broken now */
35224+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35225 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35226
35227 pDevice->apdev->type = ARPHRD_IEEE80211;
35228diff -urNp linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c
35229--- linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-11 15:19:27.000000000 -0500
35230+++ linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-16 18:39:08.000000000 -0500
35231@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
35232
35233 struct usbctlx_completor {
35234 int (*complete) (struct usbctlx_completor *);
35235-};
35236+} __no_const;
35237
35238 static int
35239 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35240diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.c linux-3.1.1/drivers/staging/zcache/tmem.c
35241--- linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-11 15:19:27.000000000 -0500
35242+++ linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-16 18:39:08.000000000 -0500
35243@@ -39,7 +39,7 @@
35244 * A tmem host implementation must use this function to register callbacks
35245 * for memory allocation.
35246 */
35247-static struct tmem_hostops tmem_hostops;
35248+static tmem_hostops_no_const tmem_hostops;
35249
35250 static void tmem_objnode_tree_init(void);
35251
35252@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
35253 * A tmem host implementation must use this function to register
35254 * callbacks for a page-accessible memory (PAM) implementation
35255 */
35256-static struct tmem_pamops tmem_pamops;
35257+static tmem_pamops_no_const tmem_pamops;
35258
35259 void tmem_register_pamops(struct tmem_pamops *m)
35260 {
35261diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.h linux-3.1.1/drivers/staging/zcache/tmem.h
35262--- linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-11 15:19:27.000000000 -0500
35263+++ linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-16 18:39:08.000000000 -0500
35264@@ -180,6 +180,7 @@ struct tmem_pamops {
35265 void (*new_obj)(struct tmem_obj *);
35266 int (*replace_in_obj)(void *, struct tmem_obj *);
35267 };
35268+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35269 extern void tmem_register_pamops(struct tmem_pamops *m);
35270
35271 /* memory allocation methods provided by the host implementation */
35272@@ -189,6 +190,7 @@ struct tmem_hostops {
35273 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35274 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35275 };
35276+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35277 extern void tmem_register_hostops(struct tmem_hostops *m);
35278
35279 /* core tmem accessor functions */
35280diff -urNp linux-3.1.1/drivers/target/iscsi/iscsi_target.c linux-3.1.1/drivers/target/iscsi/iscsi_target.c
35281--- linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-11 15:19:27.000000000 -0500
35282+++ linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-16 18:39:08.000000000 -0500
35283@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct
35284 * outstanding_r2ts reaches zero, go ahead and send the delayed
35285 * TASK_ABORTED status.
35286 */
35287- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35288+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35289 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35290 if (--cmd->outstanding_r2ts < 1) {
35291 iscsit_stop_dataout_timer(cmd);
35292diff -urNp linux-3.1.1/drivers/target/target_core_alua.c linux-3.1.1/drivers/target/target_core_alua.c
35293--- linux-3.1.1/drivers/target/target_core_alua.c 2011-11-11 15:19:27.000000000 -0500
35294+++ linux-3.1.1/drivers/target/target_core_alua.c 2011-11-16 18:40:29.000000000 -0500
35295@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_
35296 char path[ALUA_METADATA_PATH_LEN];
35297 int len;
35298
35299+ pax_track_stack();
35300+
35301 memset(path, 0, ALUA_METADATA_PATH_LEN);
35302
35303 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
35304@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondar
35305 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
35306 int len;
35307
35308+ pax_track_stack();
35309+
35310 memset(path, 0, ALUA_METADATA_PATH_LEN);
35311 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
35312
35313diff -urNp linux-3.1.1/drivers/target/target_core_cdb.c linux-3.1.1/drivers/target/target_core_cdb.c
35314--- linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-11 15:19:27.000000000 -0500
35315+++ linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-16 18:40:29.000000000 -0500
35316@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *
35317 int length = 0;
35318 unsigned char buf[SE_MODE_PAGE_BUF];
35319
35320+ pax_track_stack();
35321+
35322 memset(buf, 0, SE_MODE_PAGE_BUF);
35323
35324 switch (cdb[2] & 0x3f) {
35325diff -urNp linux-3.1.1/drivers/target/target_core_configfs.c linux-3.1.1/drivers/target/target_core_configfs.c
35326--- linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-11 15:19:27.000000000 -0500
35327+++ linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-16 19:04:37.000000000 -0500
35328@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_a
35329 ssize_t len = 0;
35330 int reg_count = 0, prf_isid;
35331
35332+ pax_track_stack();
35333+
35334 if (!su_dev->se_dev_ptr)
35335 return -ENODEV;
35336
35337diff -urNp linux-3.1.1/drivers/target/target_core_pr.c linux-3.1.1/drivers/target/target_core_pr.c
35338--- linux-3.1.1/drivers/target/target_core_pr.c 2011-11-11 15:19:27.000000000 -0500
35339+++ linux-3.1.1/drivers/target/target_core_pr.c 2011-11-16 18:40:29.000000000 -0500
35340@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
35341 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
35342 u16 tpgt;
35343
35344+ pax_track_stack();
35345+
35346 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
35347 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
35348 /*
35349@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf
35350 ssize_t len = 0;
35351 int reg_count = 0;
35352
35353+ pax_track_stack();
35354+
35355 memset(buf, 0, pr_aptpl_buf_len);
35356 /*
35357 * Called to clear metadata once APTPL has been deactivated.
35358@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_f
35359 char path[512];
35360 int ret;
35361
35362+ pax_track_stack();
35363+
35364 memset(iov, 0, sizeof(struct iovec));
35365 memset(path, 0, 512);
35366
35367diff -urNp linux-3.1.1/drivers/target/target_core_tmr.c linux-3.1.1/drivers/target/target_core_tmr.c
35368--- linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-11 15:19:27.000000000 -0500
35369+++ linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-16 18:39:08.000000000 -0500
35370@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
35371 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35372 cmd->t_task_list_num,
35373 atomic_read(&cmd->t_task_cdbs_left),
35374- atomic_read(&cmd->t_task_cdbs_sent),
35375+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35376 atomic_read(&cmd->t_transport_active),
35377 atomic_read(&cmd->t_transport_stop),
35378 atomic_read(&cmd->t_transport_sent));
35379@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
35380 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35381 " task: %p, t_fe_count: %d dev: %p\n", task,
35382 fe_count, dev);
35383- atomic_set(&cmd->t_transport_aborted, 1);
35384+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35385 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35386
35387 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35388@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
35389 }
35390 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35391 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35392- atomic_set(&cmd->t_transport_aborted, 1);
35393+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35394 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35395
35396 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35397diff -urNp linux-3.1.1/drivers/target/target_core_transport.c linux-3.1.1/drivers/target/target_core_transport.c
35398--- linux-3.1.1/drivers/target/target_core_transport.c 2011-11-11 15:19:27.000000000 -0500
35399+++ linux-3.1.1/drivers/target/target_core_transport.c 2011-11-16 18:39:08.000000000 -0500
35400@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_t
35401
35402 dev->queue_depth = dev_limits->queue_depth;
35403 atomic_set(&dev->depth_left, dev->queue_depth);
35404- atomic_set(&dev->dev_ordered_id, 0);
35405+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35406
35407 se_dev_set_default_attribs(dev, dev_limits);
35408
35409@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_at
35410 * Used to determine when ORDERED commands should go from
35411 * Dormant to Active status.
35412 */
35413- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35414+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35415 smp_mb__after_atomic_inc();
35416 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35417 cmd->se_ordered_id, cmd->sam_task_attr,
35418@@ -1960,7 +1960,7 @@ static void transport_generic_request_fa
35419 " t_transport_active: %d t_transport_stop: %d"
35420 " t_transport_sent: %d\n", cmd->t_task_list_num,
35421 atomic_read(&cmd->t_task_cdbs_left),
35422- atomic_read(&cmd->t_task_cdbs_sent),
35423+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35424 atomic_read(&cmd->t_task_cdbs_ex_left),
35425 atomic_read(&cmd->t_transport_active),
35426 atomic_read(&cmd->t_transport_stop),
35427@@ -2460,9 +2460,9 @@ check_depth:
35428 spin_lock_irqsave(&cmd->t_state_lock, flags);
35429 atomic_set(&task->task_active, 1);
35430 atomic_set(&task->task_sent, 1);
35431- atomic_inc(&cmd->t_task_cdbs_sent);
35432+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35433
35434- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35435+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35436 cmd->t_task_list_num)
35437 atomic_set(&cmd->transport_sent, 1);
35438
35439@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_t
35440 atomic_set(&cmd->transport_lun_stop, 0);
35441 }
35442 if (!atomic_read(&cmd->t_transport_active) ||
35443- atomic_read(&cmd->t_transport_aborted))
35444+ atomic_read_unchecked(&cmd->t_transport_aborted))
35445 goto remove;
35446
35447 atomic_set(&cmd->t_transport_stop, 1);
35448@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struc
35449 {
35450 int ret = 0;
35451
35452- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35453+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35454 if (!send_status ||
35455 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35456 return 1;
35457@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se
35458 */
35459 if (cmd->data_direction == DMA_TO_DEVICE) {
35460 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35461- atomic_inc(&cmd->t_transport_aborted);
35462+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35463 smp_mb__after_atomic_inc();
35464 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35465 transport_new_cmd_failure(cmd);
35466@@ -5051,7 +5051,7 @@ static void transport_processing_shutdow
35467 cmd->se_tfo->get_task_tag(cmd),
35468 cmd->t_task_list_num,
35469 atomic_read(&cmd->t_task_cdbs_left),
35470- atomic_read(&cmd->t_task_cdbs_sent),
35471+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35472 atomic_read(&cmd->t_transport_active),
35473 atomic_read(&cmd->t_transport_stop),
35474 atomic_read(&cmd->t_transport_sent));
35475diff -urNp linux-3.1.1/drivers/telephony/ixj.c linux-3.1.1/drivers/telephony/ixj.c
35476--- linux-3.1.1/drivers/telephony/ixj.c 2011-11-11 15:19:27.000000000 -0500
35477+++ linux-3.1.1/drivers/telephony/ixj.c 2011-11-16 18:40:29.000000000 -0500
35478@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35479 bool mContinue;
35480 char *pIn, *pOut;
35481
35482+ pax_track_stack();
35483+
35484 if (!SCI_Prepare(j))
35485 return 0;
35486
35487diff -urNp linux-3.1.1/drivers/tty/hvc/hvcs.c linux-3.1.1/drivers/tty/hvc/hvcs.c
35488--- linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-11 15:19:27.000000000 -0500
35489+++ linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-16 18:39:08.000000000 -0500
35490@@ -83,6 +83,7 @@
35491 #include <asm/hvcserver.h>
35492 #include <asm/uaccess.h>
35493 #include <asm/vio.h>
35494+#include <asm/local.h>
35495
35496 /*
35497 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35498@@ -270,7 +271,7 @@ struct hvcs_struct {
35499 unsigned int index;
35500
35501 struct tty_struct *tty;
35502- int open_count;
35503+ local_t open_count;
35504
35505 /*
35506 * Used to tell the driver kernel_thread what operations need to take
35507@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35508
35509 spin_lock_irqsave(&hvcsd->lock, flags);
35510
35511- if (hvcsd->open_count > 0) {
35512+ if (local_read(&hvcsd->open_count) > 0) {
35513 spin_unlock_irqrestore(&hvcsd->lock, flags);
35514 printk(KERN_INFO "HVCS: vterm state unchanged. "
35515 "The hvcs device node is still in use.\n");
35516@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35517 if ((retval = hvcs_partner_connect(hvcsd)))
35518 goto error_release;
35519
35520- hvcsd->open_count = 1;
35521+ local_set(&hvcsd->open_count, 1);
35522 hvcsd->tty = tty;
35523 tty->driver_data = hvcsd;
35524
35525@@ -1179,7 +1180,7 @@ fast_open:
35526
35527 spin_lock_irqsave(&hvcsd->lock, flags);
35528 kref_get(&hvcsd->kref);
35529- hvcsd->open_count++;
35530+ local_inc(&hvcsd->open_count);
35531 hvcsd->todo_mask |= HVCS_SCHED_READ;
35532 spin_unlock_irqrestore(&hvcsd->lock, flags);
35533
35534@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35535 hvcsd = tty->driver_data;
35536
35537 spin_lock_irqsave(&hvcsd->lock, flags);
35538- if (--hvcsd->open_count == 0) {
35539+ if (local_dec_and_test(&hvcsd->open_count)) {
35540
35541 vio_disable_interrupts(hvcsd->vdev);
35542
35543@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35544 free_irq(irq, hvcsd);
35545 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35546 return;
35547- } else if (hvcsd->open_count < 0) {
35548+ } else if (local_read(&hvcsd->open_count) < 0) {
35549 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35550 " is missmanaged.\n",
35551- hvcsd->vdev->unit_address, hvcsd->open_count);
35552+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35553 }
35554
35555 spin_unlock_irqrestore(&hvcsd->lock, flags);
35556@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35557
35558 spin_lock_irqsave(&hvcsd->lock, flags);
35559 /* Preserve this so that we know how many kref refs to put */
35560- temp_open_count = hvcsd->open_count;
35561+ temp_open_count = local_read(&hvcsd->open_count);
35562
35563 /*
35564 * Don't kref put inside the spinlock because the destruction
35565@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35566 hvcsd->tty->driver_data = NULL;
35567 hvcsd->tty = NULL;
35568
35569- hvcsd->open_count = 0;
35570+ local_set(&hvcsd->open_count, 0);
35571
35572 /* This will drop any buffered data on the floor which is OK in a hangup
35573 * scenario. */
35574@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35575 * the middle of a write operation? This is a crummy place to do this
35576 * but we want to keep it all in the spinlock.
35577 */
35578- if (hvcsd->open_count <= 0) {
35579+ if (local_read(&hvcsd->open_count) <= 0) {
35580 spin_unlock_irqrestore(&hvcsd->lock, flags);
35581 return -ENODEV;
35582 }
35583@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35584 {
35585 struct hvcs_struct *hvcsd = tty->driver_data;
35586
35587- if (!hvcsd || hvcsd->open_count <= 0)
35588+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35589 return 0;
35590
35591 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35592diff -urNp linux-3.1.1/drivers/tty/ipwireless/tty.c linux-3.1.1/drivers/tty/ipwireless/tty.c
35593--- linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-11 15:19:27.000000000 -0500
35594+++ linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-16 18:39:08.000000000 -0500
35595@@ -29,6 +29,7 @@
35596 #include <linux/tty_driver.h>
35597 #include <linux/tty_flip.h>
35598 #include <linux/uaccess.h>
35599+#include <asm/local.h>
35600
35601 #include "tty.h"
35602 #include "network.h"
35603@@ -51,7 +52,7 @@ struct ipw_tty {
35604 int tty_type;
35605 struct ipw_network *network;
35606 struct tty_struct *linux_tty;
35607- int open_count;
35608+ local_t open_count;
35609 unsigned int control_lines;
35610 struct mutex ipw_tty_mutex;
35611 int tx_bytes_queued;
35612@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35613 mutex_unlock(&tty->ipw_tty_mutex);
35614 return -ENODEV;
35615 }
35616- if (tty->open_count == 0)
35617+ if (local_read(&tty->open_count) == 0)
35618 tty->tx_bytes_queued = 0;
35619
35620- tty->open_count++;
35621+ local_inc(&tty->open_count);
35622
35623 tty->linux_tty = linux_tty;
35624 linux_tty->driver_data = tty;
35625@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35626
35627 static void do_ipw_close(struct ipw_tty *tty)
35628 {
35629- tty->open_count--;
35630-
35631- if (tty->open_count == 0) {
35632+ if (local_dec_return(&tty->open_count) == 0) {
35633 struct tty_struct *linux_tty = tty->linux_tty;
35634
35635 if (linux_tty != NULL) {
35636@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35637 return;
35638
35639 mutex_lock(&tty->ipw_tty_mutex);
35640- if (tty->open_count == 0) {
35641+ if (local_read(&tty->open_count) == 0) {
35642 mutex_unlock(&tty->ipw_tty_mutex);
35643 return;
35644 }
35645@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35646 return;
35647 }
35648
35649- if (!tty->open_count) {
35650+ if (!local_read(&tty->open_count)) {
35651 mutex_unlock(&tty->ipw_tty_mutex);
35652 return;
35653 }
35654@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35655 return -ENODEV;
35656
35657 mutex_lock(&tty->ipw_tty_mutex);
35658- if (!tty->open_count) {
35659+ if (!local_read(&tty->open_count)) {
35660 mutex_unlock(&tty->ipw_tty_mutex);
35661 return -EINVAL;
35662 }
35663@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35664 if (!tty)
35665 return -ENODEV;
35666
35667- if (!tty->open_count)
35668+ if (!local_read(&tty->open_count))
35669 return -EINVAL;
35670
35671 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35672@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35673 if (!tty)
35674 return 0;
35675
35676- if (!tty->open_count)
35677+ if (!local_read(&tty->open_count))
35678 return 0;
35679
35680 return tty->tx_bytes_queued;
35681@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35682 if (!tty)
35683 return -ENODEV;
35684
35685- if (!tty->open_count)
35686+ if (!local_read(&tty->open_count))
35687 return -EINVAL;
35688
35689 return get_control_lines(tty);
35690@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35691 if (!tty)
35692 return -ENODEV;
35693
35694- if (!tty->open_count)
35695+ if (!local_read(&tty->open_count))
35696 return -EINVAL;
35697
35698 return set_control_lines(tty, set, clear);
35699@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35700 if (!tty)
35701 return -ENODEV;
35702
35703- if (!tty->open_count)
35704+ if (!local_read(&tty->open_count))
35705 return -EINVAL;
35706
35707 /* FIXME: Exactly how is the tty object locked here .. */
35708@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35709 against a parallel ioctl etc */
35710 mutex_lock(&ttyj->ipw_tty_mutex);
35711 }
35712- while (ttyj->open_count)
35713+ while (local_read(&ttyj->open_count))
35714 do_ipw_close(ttyj);
35715 ipwireless_disassociate_network_ttys(network,
35716 ttyj->channel_idx);
35717diff -urNp linux-3.1.1/drivers/tty/n_gsm.c linux-3.1.1/drivers/tty/n_gsm.c
35718--- linux-3.1.1/drivers/tty/n_gsm.c 2011-11-11 15:19:27.000000000 -0500
35719+++ linux-3.1.1/drivers/tty/n_gsm.c 2011-11-16 18:39:08.000000000 -0500
35720@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35721 kref_init(&dlci->ref);
35722 mutex_init(&dlci->mutex);
35723 dlci->fifo = &dlci->_fifo;
35724- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35725+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35726 kfree(dlci);
35727 return NULL;
35728 }
35729diff -urNp linux-3.1.1/drivers/tty/n_tty.c linux-3.1.1/drivers/tty/n_tty.c
35730--- linux-3.1.1/drivers/tty/n_tty.c 2011-11-11 15:19:27.000000000 -0500
35731+++ linux-3.1.1/drivers/tty/n_tty.c 2011-11-16 18:39:08.000000000 -0500
35732@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35733 {
35734 *ops = tty_ldisc_N_TTY;
35735 ops->owner = NULL;
35736- ops->refcount = ops->flags = 0;
35737+ atomic_set(&ops->refcount, 0);
35738+ ops->flags = 0;
35739 }
35740 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35741diff -urNp linux-3.1.1/drivers/tty/pty.c linux-3.1.1/drivers/tty/pty.c
35742--- linux-3.1.1/drivers/tty/pty.c 2011-11-11 15:19:27.000000000 -0500
35743+++ linux-3.1.1/drivers/tty/pty.c 2011-11-16 18:39:08.000000000 -0500
35744@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35745 register_sysctl_table(pty_root_table);
35746
35747 /* Now create the /dev/ptmx special device */
35748+ pax_open_kernel();
35749 tty_default_fops(&ptmx_fops);
35750- ptmx_fops.open = ptmx_open;
35751+ *(void **)&ptmx_fops.open = ptmx_open;
35752+ pax_close_kernel();
35753
35754 cdev_init(&ptmx_cdev, &ptmx_fops);
35755 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35756diff -urNp linux-3.1.1/drivers/tty/rocket.c linux-3.1.1/drivers/tty/rocket.c
35757--- linux-3.1.1/drivers/tty/rocket.c 2011-11-11 15:19:27.000000000 -0500
35758+++ linux-3.1.1/drivers/tty/rocket.c 2011-11-16 18:40:29.000000000 -0500
35759@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35760 struct rocket_ports tmp;
35761 int board;
35762
35763+ pax_track_stack();
35764+
35765 if (!retports)
35766 return -EFAULT;
35767 memset(&tmp, 0, sizeof (tmp));
35768diff -urNp linux-3.1.1/drivers/tty/serial/kgdboc.c linux-3.1.1/drivers/tty/serial/kgdboc.c
35769--- linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-11 15:19:27.000000000 -0500
35770+++ linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-16 18:39:08.000000000 -0500
35771@@ -23,8 +23,9 @@
35772 #define MAX_CONFIG_LEN 40
35773
35774 static struct kgdb_io kgdboc_io_ops;
35775+static struct kgdb_io kgdboc_io_ops_console;
35776
35777-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35778+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35779 static int configured = -1;
35780
35781 static char config[MAX_CONFIG_LEN];
35782@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35783 kgdboc_unregister_kbd();
35784 if (configured == 1)
35785 kgdb_unregister_io_module(&kgdboc_io_ops);
35786+ else if (configured == 2)
35787+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35788 }
35789
35790 static int configure_kgdboc(void)
35791@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35792 int err;
35793 char *cptr = config;
35794 struct console *cons;
35795+ int is_console = 0;
35796
35797 err = kgdboc_option_setup(config);
35798 if (err || !strlen(config) || isspace(config[0]))
35799 goto noconfig;
35800
35801 err = -ENODEV;
35802- kgdboc_io_ops.is_console = 0;
35803 kgdb_tty_driver = NULL;
35804
35805 kgdboc_use_kms = 0;
35806@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35807 int idx;
35808 if (cons->device && cons->device(cons, &idx) == p &&
35809 idx == tty_line) {
35810- kgdboc_io_ops.is_console = 1;
35811+ is_console = 1;
35812 break;
35813 }
35814 cons = cons->next;
35815@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35816 kgdb_tty_line = tty_line;
35817
35818 do_register:
35819- err = kgdb_register_io_module(&kgdboc_io_ops);
35820+ if (is_console) {
35821+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35822+ configured = 2;
35823+ } else {
35824+ err = kgdb_register_io_module(&kgdboc_io_ops);
35825+ configured = 1;
35826+ }
35827 if (err)
35828 goto noconfig;
35829
35830- configured = 1;
35831-
35832 return 0;
35833
35834 noconfig:
35835@@ -212,7 +219,7 @@ noconfig:
35836 static int __init init_kgdboc(void)
35837 {
35838 /* Already configured? */
35839- if (configured == 1)
35840+ if (configured >= 1)
35841 return 0;
35842
35843 return configure_kgdboc();
35844@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35845 if (config[len - 1] == '\n')
35846 config[len - 1] = '\0';
35847
35848- if (configured == 1)
35849+ if (configured >= 1)
35850 cleanup_kgdboc();
35851
35852 /* Go and configure with the new params. */
35853@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35854 .post_exception = kgdboc_post_exp_handler,
35855 };
35856
35857+static struct kgdb_io kgdboc_io_ops_console = {
35858+ .name = "kgdboc",
35859+ .read_char = kgdboc_get_char,
35860+ .write_char = kgdboc_put_char,
35861+ .pre_exception = kgdboc_pre_exp_handler,
35862+ .post_exception = kgdboc_post_exp_handler,
35863+ .is_console = 1
35864+};
35865+
35866 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35867 /* This is only available if kgdboc is a built in for early debugging */
35868 static int __init kgdboc_early_init(char *opt)
35869diff -urNp linux-3.1.1/drivers/tty/serial/mfd.c linux-3.1.1/drivers/tty/serial/mfd.c
35870--- linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-11 15:19:27.000000000 -0500
35871+++ linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-16 18:39:08.000000000 -0500
35872@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35873 }
35874
35875 /* First 3 are UART ports, and the 4th is the DMA */
35876-static const struct pci_device_id pci_ids[] __devinitdata = {
35877+static const struct pci_device_id pci_ids[] __devinitconst = {
35878 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35879 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35880 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35881diff -urNp linux-3.1.1/drivers/tty/serial/mrst_max3110.c linux-3.1.1/drivers/tty/serial/mrst_max3110.c
35882--- linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-11 15:19:27.000000000 -0500
35883+++ linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-16 18:40:29.000000000 -0500
35884@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35885 int loop = 1, num, total = 0;
35886 u8 recv_buf[512], *pbuf;
35887
35888+ pax_track_stack();
35889+
35890 pbuf = recv_buf;
35891 do {
35892 num = max3110_read_multi(max, pbuf);
35893diff -urNp linux-3.1.1/drivers/tty/tty_io.c linux-3.1.1/drivers/tty/tty_io.c
35894--- linux-3.1.1/drivers/tty/tty_io.c 2011-11-11 15:19:27.000000000 -0500
35895+++ linux-3.1.1/drivers/tty/tty_io.c 2011-11-16 18:39:08.000000000 -0500
35896@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35897
35898 void tty_default_fops(struct file_operations *fops)
35899 {
35900- *fops = tty_fops;
35901+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35902 }
35903
35904 /*
35905diff -urNp linux-3.1.1/drivers/tty/tty_ldisc.c linux-3.1.1/drivers/tty/tty_ldisc.c
35906--- linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-11 15:19:27.000000000 -0500
35907+++ linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-16 18:39:08.000000000 -0500
35908@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35909 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35910 struct tty_ldisc_ops *ldo = ld->ops;
35911
35912- ldo->refcount--;
35913+ atomic_dec(&ldo->refcount);
35914 module_put(ldo->owner);
35915 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35916
35917@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35918 spin_lock_irqsave(&tty_ldisc_lock, flags);
35919 tty_ldiscs[disc] = new_ldisc;
35920 new_ldisc->num = disc;
35921- new_ldisc->refcount = 0;
35922+ atomic_set(&new_ldisc->refcount, 0);
35923 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35924
35925 return ret;
35926@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35927 return -EINVAL;
35928
35929 spin_lock_irqsave(&tty_ldisc_lock, flags);
35930- if (tty_ldiscs[disc]->refcount)
35931+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35932 ret = -EBUSY;
35933 else
35934 tty_ldiscs[disc] = NULL;
35935@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35936 if (ldops) {
35937 ret = ERR_PTR(-EAGAIN);
35938 if (try_module_get(ldops->owner)) {
35939- ldops->refcount++;
35940+ atomic_inc(&ldops->refcount);
35941 ret = ldops;
35942 }
35943 }
35944@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35945 unsigned long flags;
35946
35947 spin_lock_irqsave(&tty_ldisc_lock, flags);
35948- ldops->refcount--;
35949+ atomic_dec(&ldops->refcount);
35950 module_put(ldops->owner);
35951 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35952 }
35953diff -urNp linux-3.1.1/drivers/tty/vt/keyboard.c linux-3.1.1/drivers/tty/vt/keyboard.c
35954--- linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-11 15:19:27.000000000 -0500
35955+++ linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-16 18:40:29.000000000 -0500
35956@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35957 kbd->kbdmode == VC_OFF) &&
35958 value != KVAL(K_SAK))
35959 return; /* SAK is allowed even in raw mode */
35960+
35961+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35962+ {
35963+ void *func = fn_handler[value];
35964+ if (func == fn_show_state || func == fn_show_ptregs ||
35965+ func == fn_show_mem)
35966+ return;
35967+ }
35968+#endif
35969+
35970 fn_handler[value](vc);
35971 }
35972
35973diff -urNp linux-3.1.1/drivers/tty/vt/vt.c linux-3.1.1/drivers/tty/vt/vt.c
35974--- linux-3.1.1/drivers/tty/vt/vt.c 2011-11-11 15:19:27.000000000 -0500
35975+++ linux-3.1.1/drivers/tty/vt/vt.c 2011-11-16 18:39:08.000000000 -0500
35976@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35977
35978 static void notify_write(struct vc_data *vc, unsigned int unicode)
35979 {
35980- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35981+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
35982 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35983 }
35984
35985diff -urNp linux-3.1.1/drivers/tty/vt/vt_ioctl.c linux-3.1.1/drivers/tty/vt/vt_ioctl.c
35986--- linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-11 15:19:27.000000000 -0500
35987+++ linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-16 18:40:29.000000000 -0500
35988@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35989 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35990 return -EFAULT;
35991
35992- if (!capable(CAP_SYS_TTY_CONFIG))
35993- perm = 0;
35994-
35995 switch (cmd) {
35996 case KDGKBENT:
35997 key_map = key_maps[s];
35998@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35999 val = (i ? K_HOLE : K_NOSUCHMAP);
36000 return put_user(val, &user_kbe->kb_value);
36001 case KDSKBENT:
36002+ if (!capable(CAP_SYS_TTY_CONFIG))
36003+ perm = 0;
36004+
36005 if (!perm)
36006 return -EPERM;
36007 if (!i && v == K_NOSUCHMAP) {
36008@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36009 int i, j, k;
36010 int ret;
36011
36012- if (!capable(CAP_SYS_TTY_CONFIG))
36013- perm = 0;
36014-
36015 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36016 if (!kbs) {
36017 ret = -ENOMEM;
36018@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36019 kfree(kbs);
36020 return ((p && *p) ? -EOVERFLOW : 0);
36021 case KDSKBSENT:
36022+ if (!capable(CAP_SYS_TTY_CONFIG))
36023+ perm = 0;
36024+
36025 if (!perm) {
36026 ret = -EPERM;
36027 goto reterr;
36028diff -urNp linux-3.1.1/drivers/uio/uio.c linux-3.1.1/drivers/uio/uio.c
36029--- linux-3.1.1/drivers/uio/uio.c 2011-11-11 15:19:27.000000000 -0500
36030+++ linux-3.1.1/drivers/uio/uio.c 2011-11-16 18:39:08.000000000 -0500
36031@@ -25,6 +25,7 @@
36032 #include <linux/kobject.h>
36033 #include <linux/cdev.h>
36034 #include <linux/uio_driver.h>
36035+#include <asm/local.h>
36036
36037 #define UIO_MAX_DEVICES (1U << MINORBITS)
36038
36039@@ -32,10 +33,10 @@ struct uio_device {
36040 struct module *owner;
36041 struct device *dev;
36042 int minor;
36043- atomic_t event;
36044+ atomic_unchecked_t event;
36045 struct fasync_struct *async_queue;
36046 wait_queue_head_t wait;
36047- int vma_count;
36048+ local_t vma_count;
36049 struct uio_info *info;
36050 struct kobject *map_dir;
36051 struct kobject *portio_dir;
36052@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36053 struct device_attribute *attr, char *buf)
36054 {
36055 struct uio_device *idev = dev_get_drvdata(dev);
36056- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36057+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36058 }
36059
36060 static struct device_attribute uio_class_attributes[] = {
36061@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36062 {
36063 struct uio_device *idev = info->uio_dev;
36064
36065- atomic_inc(&idev->event);
36066+ atomic_inc_unchecked(&idev->event);
36067 wake_up_interruptible(&idev->wait);
36068 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36069 }
36070@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36071 }
36072
36073 listener->dev = idev;
36074- listener->event_count = atomic_read(&idev->event);
36075+ listener->event_count = atomic_read_unchecked(&idev->event);
36076 filep->private_data = listener;
36077
36078 if (idev->info->open) {
36079@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36080 return -EIO;
36081
36082 poll_wait(filep, &idev->wait, wait);
36083- if (listener->event_count != atomic_read(&idev->event))
36084+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36085 return POLLIN | POLLRDNORM;
36086 return 0;
36087 }
36088@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36089 do {
36090 set_current_state(TASK_INTERRUPTIBLE);
36091
36092- event_count = atomic_read(&idev->event);
36093+ event_count = atomic_read_unchecked(&idev->event);
36094 if (event_count != listener->event_count) {
36095 if (copy_to_user(buf, &event_count, count))
36096 retval = -EFAULT;
36097@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
36098 static void uio_vma_open(struct vm_area_struct *vma)
36099 {
36100 struct uio_device *idev = vma->vm_private_data;
36101- idev->vma_count++;
36102+ local_inc(&idev->vma_count);
36103 }
36104
36105 static void uio_vma_close(struct vm_area_struct *vma)
36106 {
36107 struct uio_device *idev = vma->vm_private_data;
36108- idev->vma_count--;
36109+ local_dec(&idev->vma_count);
36110 }
36111
36112 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36113@@ -823,7 +824,7 @@ int __uio_register_device(struct module
36114 idev->owner = owner;
36115 idev->info = info;
36116 init_waitqueue_head(&idev->wait);
36117- atomic_set(&idev->event, 0);
36118+ atomic_set_unchecked(&idev->event, 0);
36119
36120 ret = uio_get_minor(idev);
36121 if (ret)
36122diff -urNp linux-3.1.1/drivers/usb/atm/cxacru.c linux-3.1.1/drivers/usb/atm/cxacru.c
36123--- linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-11 15:19:27.000000000 -0500
36124+++ linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-16 18:39:08.000000000 -0500
36125@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
36126 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36127 if (ret < 2)
36128 return -EINVAL;
36129- if (index < 0 || index > 0x7f)
36130+ if (index > 0x7f)
36131 return -EINVAL;
36132 pos += tmp;
36133
36134diff -urNp linux-3.1.1/drivers/usb/atm/usbatm.c linux-3.1.1/drivers/usb/atm/usbatm.c
36135--- linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-11 15:19:27.000000000 -0500
36136+++ linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-16 18:39:08.000000000 -0500
36137@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
36138 if (printk_ratelimit())
36139 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36140 __func__, vpi, vci);
36141- atomic_inc(&vcc->stats->rx_err);
36142+ atomic_inc_unchecked(&vcc->stats->rx_err);
36143 return;
36144 }
36145
36146@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
36147 if (length > ATM_MAX_AAL5_PDU) {
36148 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36149 __func__, length, vcc);
36150- atomic_inc(&vcc->stats->rx_err);
36151+ atomic_inc_unchecked(&vcc->stats->rx_err);
36152 goto out;
36153 }
36154
36155@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
36156 if (sarb->len < pdu_length) {
36157 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36158 __func__, pdu_length, sarb->len, vcc);
36159- atomic_inc(&vcc->stats->rx_err);
36160+ atomic_inc_unchecked(&vcc->stats->rx_err);
36161 goto out;
36162 }
36163
36164 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36165 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36166 __func__, vcc);
36167- atomic_inc(&vcc->stats->rx_err);
36168+ atomic_inc_unchecked(&vcc->stats->rx_err);
36169 goto out;
36170 }
36171
36172@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
36173 if (printk_ratelimit())
36174 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36175 __func__, length);
36176- atomic_inc(&vcc->stats->rx_drop);
36177+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36178 goto out;
36179 }
36180
36181@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
36182
36183 vcc->push(vcc, skb);
36184
36185- atomic_inc(&vcc->stats->rx);
36186+ atomic_inc_unchecked(&vcc->stats->rx);
36187 out:
36188 skb_trim(sarb, 0);
36189 }
36190@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
36191 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36192
36193 usbatm_pop(vcc, skb);
36194- atomic_inc(&vcc->stats->tx);
36195+ atomic_inc_unchecked(&vcc->stats->tx);
36196
36197 skb = skb_dequeue(&instance->sndqueue);
36198 }
36199@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
36200 if (!left--)
36201 return sprintf(page,
36202 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36203- atomic_read(&atm_dev->stats.aal5.tx),
36204- atomic_read(&atm_dev->stats.aal5.tx_err),
36205- atomic_read(&atm_dev->stats.aal5.rx),
36206- atomic_read(&atm_dev->stats.aal5.rx_err),
36207- atomic_read(&atm_dev->stats.aal5.rx_drop));
36208+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36209+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36210+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36211+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36212+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36213
36214 if (!left--) {
36215 if (instance->disconnected)
36216diff -urNp linux-3.1.1/drivers/usb/core/devices.c linux-3.1.1/drivers/usb/core/devices.c
36217--- linux-3.1.1/drivers/usb/core/devices.c 2011-11-11 15:19:27.000000000 -0500
36218+++ linux-3.1.1/drivers/usb/core/devices.c 2011-11-16 18:39:08.000000000 -0500
36219@@ -126,7 +126,7 @@ static const char format_endpt[] =
36220 * time it gets called.
36221 */
36222 static struct device_connect_event {
36223- atomic_t count;
36224+ atomic_unchecked_t count;
36225 wait_queue_head_t wait;
36226 } device_event = {
36227 .count = ATOMIC_INIT(1),
36228@@ -164,7 +164,7 @@ static const struct class_info clas_info
36229
36230 void usbfs_conn_disc_event(void)
36231 {
36232- atomic_add(2, &device_event.count);
36233+ atomic_add_unchecked(2, &device_event.count);
36234 wake_up(&device_event.wait);
36235 }
36236
36237@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
36238
36239 poll_wait(file, &device_event.wait, wait);
36240
36241- event_count = atomic_read(&device_event.count);
36242+ event_count = atomic_read_unchecked(&device_event.count);
36243 if (file->f_version != event_count) {
36244 file->f_version = event_count;
36245 return POLLIN | POLLRDNORM;
36246diff -urNp linux-3.1.1/drivers/usb/core/message.c linux-3.1.1/drivers/usb/core/message.c
36247--- linux-3.1.1/drivers/usb/core/message.c 2011-11-11 15:19:27.000000000 -0500
36248+++ linux-3.1.1/drivers/usb/core/message.c 2011-11-16 18:39:08.000000000 -0500
36249@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
36250 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36251 if (buf) {
36252 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36253- if (len > 0) {
36254- smallbuf = kmalloc(++len, GFP_NOIO);
36255+ if (len++ > 0) {
36256+ smallbuf = kmalloc(len, GFP_NOIO);
36257 if (!smallbuf)
36258 return buf;
36259 memcpy(smallbuf, buf, len);
36260diff -urNp linux-3.1.1/drivers/usb/early/ehci-dbgp.c linux-3.1.1/drivers/usb/early/ehci-dbgp.c
36261--- linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-11 15:19:27.000000000 -0500
36262+++ linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-16 18:39:08.000000000 -0500
36263@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
36264
36265 #ifdef CONFIG_KGDB
36266 static struct kgdb_io kgdbdbgp_io_ops;
36267-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36268+static struct kgdb_io kgdbdbgp_io_ops_console;
36269+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36270 #else
36271 #define dbgp_kgdb_mode (0)
36272 #endif
36273@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
36274 .write_char = kgdbdbgp_write_char,
36275 };
36276
36277+static struct kgdb_io kgdbdbgp_io_ops_console = {
36278+ .name = "kgdbdbgp",
36279+ .read_char = kgdbdbgp_read_char,
36280+ .write_char = kgdbdbgp_write_char,
36281+ .is_console = 1
36282+};
36283+
36284 static int kgdbdbgp_wait_time;
36285
36286 static int __init kgdbdbgp_parse_config(char *str)
36287@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
36288 ptr++;
36289 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36290 }
36291- kgdb_register_io_module(&kgdbdbgp_io_ops);
36292- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36293+ if (early_dbgp_console.index != -1)
36294+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36295+ else
36296+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36297
36298 return 0;
36299 }
36300diff -urNp linux-3.1.1/drivers/usb/host/xhci-mem.c linux-3.1.1/drivers/usb/host/xhci-mem.c
36301--- linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-11 15:19:27.000000000 -0500
36302+++ linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-16 18:40:29.000000000 -0500
36303@@ -1690,6 +1690,8 @@ static int xhci_check_trb_in_td_math(str
36304 unsigned int num_tests;
36305 int i, ret;
36306
36307+ pax_track_stack();
36308+
36309 num_tests = ARRAY_SIZE(simple_test_vector);
36310 for (i = 0; i < num_tests; i++) {
36311 ret = xhci_test_trb_in_td(xhci,
36312diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-hc.h linux-3.1.1/drivers/usb/wusbcore/wa-hc.h
36313--- linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-11 15:19:27.000000000 -0500
36314+++ linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-16 18:39:08.000000000 -0500
36315@@ -192,7 +192,7 @@ struct wahc {
36316 struct list_head xfer_delayed_list;
36317 spinlock_t xfer_list_lock;
36318 struct work_struct xfer_work;
36319- atomic_t xfer_id_count;
36320+ atomic_unchecked_t xfer_id_count;
36321 };
36322
36323
36324@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
36325 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36326 spin_lock_init(&wa->xfer_list_lock);
36327 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36328- atomic_set(&wa->xfer_id_count, 1);
36329+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36330 }
36331
36332 /**
36333diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c
36334--- linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-11 15:19:27.000000000 -0500
36335+++ linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-16 18:39:08.000000000 -0500
36336@@ -295,7 +295,7 @@ out:
36337 */
36338 static void wa_xfer_id_init(struct wa_xfer *xfer)
36339 {
36340- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36341+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36342 }
36343
36344 /*
36345diff -urNp linux-3.1.1/drivers/vhost/vhost.c linux-3.1.1/drivers/vhost/vhost.c
36346--- linux-3.1.1/drivers/vhost/vhost.c 2011-11-11 15:19:27.000000000 -0500
36347+++ linux-3.1.1/drivers/vhost/vhost.c 2011-11-16 18:39:08.000000000 -0500
36348@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhos
36349 return 0;
36350 }
36351
36352-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36353+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36354 {
36355 struct file *eventfp, *filep = NULL,
36356 *pollstart = NULL, *pollstop = NULL;
36357diff -urNp linux-3.1.1/drivers/video/aty/aty128fb.c linux-3.1.1/drivers/video/aty/aty128fb.c
36358--- linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-11 15:19:27.000000000 -0500
36359+++ linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-16 18:39:08.000000000 -0500
36360@@ -148,7 +148,7 @@ enum {
36361 };
36362
36363 /* Must match above enum */
36364-static const char *r128_family[] __devinitdata = {
36365+static const char *r128_family[] __devinitconst = {
36366 "AGP",
36367 "PCI",
36368 "PRO AGP",
36369diff -urNp linux-3.1.1/drivers/video/fbcmap.c linux-3.1.1/drivers/video/fbcmap.c
36370--- linux-3.1.1/drivers/video/fbcmap.c 2011-11-11 15:19:27.000000000 -0500
36371+++ linux-3.1.1/drivers/video/fbcmap.c 2011-11-16 18:39:08.000000000 -0500
36372@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36373 rc = -ENODEV;
36374 goto out;
36375 }
36376- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36377- !info->fbops->fb_setcmap)) {
36378+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36379 rc = -EINVAL;
36380 goto out1;
36381 }
36382diff -urNp linux-3.1.1/drivers/video/fbmem.c linux-3.1.1/drivers/video/fbmem.c
36383--- linux-3.1.1/drivers/video/fbmem.c 2011-11-11 15:19:27.000000000 -0500
36384+++ linux-3.1.1/drivers/video/fbmem.c 2011-11-16 18:40:29.000000000 -0500
36385@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36386 image->dx += image->width + 8;
36387 }
36388 } else if (rotate == FB_ROTATE_UD) {
36389- for (x = 0; x < num && image->dx >= 0; x++) {
36390+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36391 info->fbops->fb_imageblit(info, image);
36392 image->dx -= image->width + 8;
36393 }
36394@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36395 image->dy += image->height + 8;
36396 }
36397 } else if (rotate == FB_ROTATE_CCW) {
36398- for (x = 0; x < num && image->dy >= 0; x++) {
36399+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36400 info->fbops->fb_imageblit(info, image);
36401 image->dy -= image->height + 8;
36402 }
36403@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36404 int flags = info->flags;
36405 int ret = 0;
36406
36407+ pax_track_stack();
36408+
36409 if (var->activate & FB_ACTIVATE_INV_MODE) {
36410 struct fb_videomode mode1, mode2;
36411
36412@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36413 void __user *argp = (void __user *)arg;
36414 long ret = 0;
36415
36416+ pax_track_stack();
36417+
36418 switch (cmd) {
36419 case FBIOGET_VSCREENINFO:
36420 if (!lock_fb_info(info))
36421@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36422 return -EFAULT;
36423 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36424 return -EINVAL;
36425- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36426+ if (con2fb.framebuffer >= FB_MAX)
36427 return -EINVAL;
36428 if (!registered_fb[con2fb.framebuffer])
36429 request_module("fb%d", con2fb.framebuffer);
36430diff -urNp linux-3.1.1/drivers/video/geode/gx1fb_core.c linux-3.1.1/drivers/video/geode/gx1fb_core.c
36431--- linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-11 15:19:27.000000000 -0500
36432+++ linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-16 18:39:08.000000000 -0500
36433@@ -29,7 +29,7 @@ static int crt_option = 1;
36434 static char panel_option[32] = "";
36435
36436 /* Modes relevant to the GX1 (taken from modedb.c) */
36437-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36438+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36439 /* 640x480-60 VESA */
36440 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36441 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36442diff -urNp linux-3.1.1/drivers/video/gxt4500.c linux-3.1.1/drivers/video/gxt4500.c
36443--- linux-3.1.1/drivers/video/gxt4500.c 2011-11-11 15:19:27.000000000 -0500
36444+++ linux-3.1.1/drivers/video/gxt4500.c 2011-11-16 18:39:08.000000000 -0500
36445@@ -156,7 +156,7 @@ struct gxt4500_par {
36446 static char *mode_option;
36447
36448 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36449-static const struct fb_videomode defaultmode __devinitdata = {
36450+static const struct fb_videomode defaultmode __devinitconst = {
36451 .refresh = 60,
36452 .xres = 1280,
36453 .yres = 1024,
36454@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36455 return 0;
36456 }
36457
36458-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36459+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36460 .id = "IBM GXT4500P",
36461 .type = FB_TYPE_PACKED_PIXELS,
36462 .visual = FB_VISUAL_PSEUDOCOLOR,
36463diff -urNp linux-3.1.1/drivers/video/i810/i810_accel.c linux-3.1.1/drivers/video/i810/i810_accel.c
36464--- linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-11 15:19:27.000000000 -0500
36465+++ linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-16 18:39:08.000000000 -0500
36466@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36467 }
36468 }
36469 printk("ringbuffer lockup!!!\n");
36470+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36471 i810_report_error(mmio);
36472 par->dev_flags |= LOCKUP;
36473 info->pixmap.scan_align = 1;
36474diff -urNp linux-3.1.1/drivers/video/i810/i810_main.c linux-3.1.1/drivers/video/i810/i810_main.c
36475--- linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-11 15:19:27.000000000 -0500
36476+++ linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-16 18:39:08.000000000 -0500
36477@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36478 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36479
36480 /* PCI */
36481-static const char *i810_pci_list[] __devinitdata = {
36482+static const char *i810_pci_list[] __devinitconst = {
36483 "Intel(R) 810 Framebuffer Device" ,
36484 "Intel(R) 810-DC100 Framebuffer Device" ,
36485 "Intel(R) 810E Framebuffer Device" ,
36486diff -urNp linux-3.1.1/drivers/video/jz4740_fb.c linux-3.1.1/drivers/video/jz4740_fb.c
36487--- linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-11 15:19:27.000000000 -0500
36488+++ linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-16 18:39:08.000000000 -0500
36489@@ -136,7 +136,7 @@ struct jzfb {
36490 uint32_t pseudo_palette[16];
36491 };
36492
36493-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36494+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36495 .id = "JZ4740 FB",
36496 .type = FB_TYPE_PACKED_PIXELS,
36497 .visual = FB_VISUAL_TRUECOLOR,
36498diff -urNp linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm
36499--- linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-11 15:19:27.000000000 -0500
36500+++ linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-16 18:40:29.000000000 -0500
36501@@ -1,1604 +1,1123 @@
36502 P3
36503-# Standard 224-color Linux logo
36504 80 80
36505 255
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 6 6 6 6 6 6 10 10 10 10 10 10
36516- 10 10 10 6 6 6 6 6 6 6 6 6
36517- 0 0 0 0 0 0 0 0 0 0 0 0
36518- 0 0 0 0 0 0 0 0 0 0 0 0
36519- 0 0 0 0 0 0 0 0 0 0 0 0
36520- 0 0 0 0 0 0 0 0 0 0 0 0
36521- 0 0 0 0 0 0 0 0 0 0 0 0
36522- 0 0 0 0 0 0 0 0 0 0 0 0
36523- 0 0 0 0 0 0 0 0 0 0 0 0
36524- 0 0 0 0 0 0 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 6 6 6 10 10 10 14 14 14
36535- 22 22 22 26 26 26 30 30 30 34 34 34
36536- 30 30 30 30 30 30 26 26 26 18 18 18
36537- 14 14 14 10 10 10 6 6 6 0 0 0
36538- 0 0 0 0 0 0 0 0 0 0 0 0
36539- 0 0 0 0 0 0 0 0 0 0 0 0
36540- 0 0 0 0 0 0 0 0 0 0 0 0
36541- 0 0 0 0 0 0 0 0 0 0 0 0
36542- 0 0 0 0 0 0 0 0 0 0 0 0
36543- 0 0 0 0 0 0 0 0 0 0 0 0
36544- 0 0 0 0 0 0 0 0 0 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 1 0 0 1 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 6 6 6 14 14 14 26 26 26 42 42 42
36555- 54 54 54 66 66 66 78 78 78 78 78 78
36556- 78 78 78 74 74 74 66 66 66 54 54 54
36557- 42 42 42 26 26 26 18 18 18 10 10 10
36558- 6 6 6 0 0 0 0 0 0 0 0 0
36559- 0 0 0 0 0 0 0 0 0 0 0 0
36560- 0 0 0 0 0 0 0 0 0 0 0 0
36561- 0 0 0 0 0 0 0 0 0 0 0 0
36562- 0 0 0 0 0 0 0 0 0 0 0 0
36563- 0 0 0 0 0 0 0 0 0 0 0 0
36564- 0 0 0 0 0 0 0 0 0 0 0 0
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 1 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 10 10 10
36574- 22 22 22 42 42 42 66 66 66 86 86 86
36575- 66 66 66 38 38 38 38 38 38 22 22 22
36576- 26 26 26 34 34 34 54 54 54 66 66 66
36577- 86 86 86 70 70 70 46 46 46 26 26 26
36578- 14 14 14 6 6 6 0 0 0 0 0 0
36579- 0 0 0 0 0 0 0 0 0 0 0 0
36580- 0 0 0 0 0 0 0 0 0 0 0 0
36581- 0 0 0 0 0 0 0 0 0 0 0 0
36582- 0 0 0 0 0 0 0 0 0 0 0 0
36583- 0 0 0 0 0 0 0 0 0 0 0 0
36584- 0 0 0 0 0 0 0 0 0 0 0 0
36585- 0 0 0 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 1 0 0 1 0 0 1 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 10 10 10 26 26 26
36594- 50 50 50 82 82 82 58 58 58 6 6 6
36595- 2 2 6 2 2 6 2 2 6 2 2 6
36596- 2 2 6 2 2 6 2 2 6 2 2 6
36597- 6 6 6 54 54 54 86 86 86 66 66 66
36598- 38 38 38 18 18 18 6 6 6 0 0 0
36599- 0 0 0 0 0 0 0 0 0 0 0 0
36600- 0 0 0 0 0 0 0 0 0 0 0 0
36601- 0 0 0 0 0 0 0 0 0 0 0 0
36602- 0 0 0 0 0 0 0 0 0 0 0 0
36603- 0 0 0 0 0 0 0 0 0 0 0 0
36604- 0 0 0 0 0 0 0 0 0 0 0 0
36605- 0 0 0 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 6 6 6 22 22 22 50 50 50
36614- 78 78 78 34 34 34 2 2 6 2 2 6
36615- 2 2 6 2 2 6 2 2 6 2 2 6
36616- 2 2 6 2 2 6 2 2 6 2 2 6
36617- 2 2 6 2 2 6 6 6 6 70 70 70
36618- 78 78 78 46 46 46 22 22 22 6 6 6
36619- 0 0 0 0 0 0 0 0 0 0 0 0
36620- 0 0 0 0 0 0 0 0 0 0 0 0
36621- 0 0 0 0 0 0 0 0 0 0 0 0
36622- 0 0 0 0 0 0 0 0 0 0 0 0
36623- 0 0 0 0 0 0 0 0 0 0 0 0
36624- 0 0 0 0 0 0 0 0 0 0 0 0
36625- 0 0 0 0 0 0 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 1 0 0 1 0 0 1 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 6 6 6 18 18 18 42 42 42 82 82 82
36634- 26 26 26 2 2 6 2 2 6 2 2 6
36635- 2 2 6 2 2 6 2 2 6 2 2 6
36636- 2 2 6 2 2 6 2 2 6 14 14 14
36637- 46 46 46 34 34 34 6 6 6 2 2 6
36638- 42 42 42 78 78 78 42 42 42 18 18 18
36639- 6 6 6 0 0 0 0 0 0 0 0 0
36640- 0 0 0 0 0 0 0 0 0 0 0 0
36641- 0 0 0 0 0 0 0 0 0 0 0 0
36642- 0 0 0 0 0 0 0 0 0 0 0 0
36643- 0 0 0 0 0 0 0 0 0 0 0 0
36644- 0 0 0 0 0 0 0 0 0 0 0 0
36645- 0 0 0 0 0 0 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 1 0 0 0 0 0 1 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 10 10 10 30 30 30 66 66 66 58 58 58
36654- 2 2 6 2 2 6 2 2 6 2 2 6
36655- 2 2 6 2 2 6 2 2 6 2 2 6
36656- 2 2 6 2 2 6 2 2 6 26 26 26
36657- 86 86 86 101 101 101 46 46 46 10 10 10
36658- 2 2 6 58 58 58 70 70 70 34 34 34
36659- 10 10 10 0 0 0 0 0 0 0 0 0
36660- 0 0 0 0 0 0 0 0 0 0 0 0
36661- 0 0 0 0 0 0 0 0 0 0 0 0
36662- 0 0 0 0 0 0 0 0 0 0 0 0
36663- 0 0 0 0 0 0 0 0 0 0 0 0
36664- 0 0 0 0 0 0 0 0 0 0 0 0
36665- 0 0 0 0 0 0 0 0 0 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 1 0 0 1 0 0 1 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 14 14 14 42 42 42 86 86 86 10 10 10
36674- 2 2 6 2 2 6 2 2 6 2 2 6
36675- 2 2 6 2 2 6 2 2 6 2 2 6
36676- 2 2 6 2 2 6 2 2 6 30 30 30
36677- 94 94 94 94 94 94 58 58 58 26 26 26
36678- 2 2 6 6 6 6 78 78 78 54 54 54
36679- 22 22 22 6 6 6 0 0 0 0 0 0
36680- 0 0 0 0 0 0 0 0 0 0 0 0
36681- 0 0 0 0 0 0 0 0 0 0 0 0
36682- 0 0 0 0 0 0 0 0 0 0 0 0
36683- 0 0 0 0 0 0 0 0 0 0 0 0
36684- 0 0 0 0 0 0 0 0 0 0 0 0
36685- 0 0 0 0 0 0 0 0 0 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 6 6 6
36693- 22 22 22 62 62 62 62 62 62 2 2 6
36694- 2 2 6 2 2 6 2 2 6 2 2 6
36695- 2 2 6 2 2 6 2 2 6 2 2 6
36696- 2 2 6 2 2 6 2 2 6 26 26 26
36697- 54 54 54 38 38 38 18 18 18 10 10 10
36698- 2 2 6 2 2 6 34 34 34 82 82 82
36699- 38 38 38 14 14 14 0 0 0 0 0 0
36700- 0 0 0 0 0 0 0 0 0 0 0 0
36701- 0 0 0 0 0 0 0 0 0 0 0 0
36702- 0 0 0 0 0 0 0 0 0 0 0 0
36703- 0 0 0 0 0 0 0 0 0 0 0 0
36704- 0 0 0 0 0 0 0 0 0 0 0 0
36705- 0 0 0 0 0 0 0 0 0 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 1 0 0 1 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 6 6 6
36713- 30 30 30 78 78 78 30 30 30 2 2 6
36714- 2 2 6 2 2 6 2 2 6 2 2 6
36715- 2 2 6 2 2 6 2 2 6 2 2 6
36716- 2 2 6 2 2 6 2 2 6 10 10 10
36717- 10 10 10 2 2 6 2 2 6 2 2 6
36718- 2 2 6 2 2 6 2 2 6 78 78 78
36719- 50 50 50 18 18 18 6 6 6 0 0 0
36720- 0 0 0 0 0 0 0 0 0 0 0 0
36721- 0 0 0 0 0 0 0 0 0 0 0 0
36722- 0 0 0 0 0 0 0 0 0 0 0 0
36723- 0 0 0 0 0 0 0 0 0 0 0 0
36724- 0 0 0 0 0 0 0 0 0 0 0 0
36725- 0 0 0 0 0 0 0 0 0 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 1 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 10 10 10
36733- 38 38 38 86 86 86 14 14 14 2 2 6
36734- 2 2 6 2 2 6 2 2 6 2 2 6
36735- 2 2 6 2 2 6 2 2 6 2 2 6
36736- 2 2 6 2 2 6 2 2 6 2 2 6
36737- 2 2 6 2 2 6 2 2 6 2 2 6
36738- 2 2 6 2 2 6 2 2 6 54 54 54
36739- 66 66 66 26 26 26 6 6 6 0 0 0
36740- 0 0 0 0 0 0 0 0 0 0 0 0
36741- 0 0 0 0 0 0 0 0 0 0 0 0
36742- 0 0 0 0 0 0 0 0 0 0 0 0
36743- 0 0 0 0 0 0 0 0 0 0 0 0
36744- 0 0 0 0 0 0 0 0 0 0 0 0
36745- 0 0 0 0 0 0 0 0 0 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 1 0 0 1 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 14 14 14
36753- 42 42 42 82 82 82 2 2 6 2 2 6
36754- 2 2 6 6 6 6 10 10 10 2 2 6
36755- 2 2 6 2 2 6 2 2 6 2 2 6
36756- 2 2 6 2 2 6 2 2 6 6 6 6
36757- 14 14 14 10 10 10 2 2 6 2 2 6
36758- 2 2 6 2 2 6 2 2 6 18 18 18
36759- 82 82 82 34 34 34 10 10 10 0 0 0
36760- 0 0 0 0 0 0 0 0 0 0 0 0
36761- 0 0 0 0 0 0 0 0 0 0 0 0
36762- 0 0 0 0 0 0 0 0 0 0 0 0
36763- 0 0 0 0 0 0 0 0 0 0 0 0
36764- 0 0 0 0 0 0 0 0 0 0 0 0
36765- 0 0 0 0 0 0 0 0 0 0 0 0
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 1 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 14 14 14
36773- 46 46 46 86 86 86 2 2 6 2 2 6
36774- 6 6 6 6 6 6 22 22 22 34 34 34
36775- 6 6 6 2 2 6 2 2 6 2 2 6
36776- 2 2 6 2 2 6 18 18 18 34 34 34
36777- 10 10 10 50 50 50 22 22 22 2 2 6
36778- 2 2 6 2 2 6 2 2 6 10 10 10
36779- 86 86 86 42 42 42 14 14 14 0 0 0
36780- 0 0 0 0 0 0 0 0 0 0 0 0
36781- 0 0 0 0 0 0 0 0 0 0 0 0
36782- 0 0 0 0 0 0 0 0 0 0 0 0
36783- 0 0 0 0 0 0 0 0 0 0 0 0
36784- 0 0 0 0 0 0 0 0 0 0 0 0
36785- 0 0 0 0 0 0 0 0 0 0 0 0
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 1 0 0 1 0 0 1 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 14 14 14
36793- 46 46 46 86 86 86 2 2 6 2 2 6
36794- 38 38 38 116 116 116 94 94 94 22 22 22
36795- 22 22 22 2 2 6 2 2 6 2 2 6
36796- 14 14 14 86 86 86 138 138 138 162 162 162
36797-154 154 154 38 38 38 26 26 26 6 6 6
36798- 2 2 6 2 2 6 2 2 6 2 2 6
36799- 86 86 86 46 46 46 14 14 14 0 0 0
36800- 0 0 0 0 0 0 0 0 0 0 0 0
36801- 0 0 0 0 0 0 0 0 0 0 0 0
36802- 0 0 0 0 0 0 0 0 0 0 0 0
36803- 0 0 0 0 0 0 0 0 0 0 0 0
36804- 0 0 0 0 0 0 0 0 0 0 0 0
36805- 0 0 0 0 0 0 0 0 0 0 0 0
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 14 14 14
36813- 46 46 46 86 86 86 2 2 6 14 14 14
36814-134 134 134 198 198 198 195 195 195 116 116 116
36815- 10 10 10 2 2 6 2 2 6 6 6 6
36816-101 98 89 187 187 187 210 210 210 218 218 218
36817-214 214 214 134 134 134 14 14 14 6 6 6
36818- 2 2 6 2 2 6 2 2 6 2 2 6
36819- 86 86 86 50 50 50 18 18 18 6 6 6
36820- 0 0 0 0 0 0 0 0 0 0 0 0
36821- 0 0 0 0 0 0 0 0 0 0 0 0
36822- 0 0 0 0 0 0 0 0 0 0 0 0
36823- 0 0 0 0 0 0 0 0 0 0 0 0
36824- 0 0 0 0 0 0 0 0 0 0 0 0
36825- 0 0 0 0 0 0 0 0 0 0 0 0
36826- 0 0 0 0 0 0 0 0 1 0 0 0
36827- 0 0 1 0 0 1 0 0 1 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 14 14 14
36833- 46 46 46 86 86 86 2 2 6 54 54 54
36834-218 218 218 195 195 195 226 226 226 246 246 246
36835- 58 58 58 2 2 6 2 2 6 30 30 30
36836-210 210 210 253 253 253 174 174 174 123 123 123
36837-221 221 221 234 234 234 74 74 74 2 2 6
36838- 2 2 6 2 2 6 2 2 6 2 2 6
36839- 70 70 70 58 58 58 22 22 22 6 6 6
36840- 0 0 0 0 0 0 0 0 0 0 0 0
36841- 0 0 0 0 0 0 0 0 0 0 0 0
36842- 0 0 0 0 0 0 0 0 0 0 0 0
36843- 0 0 0 0 0 0 0 0 0 0 0 0
36844- 0 0 0 0 0 0 0 0 0 0 0 0
36845- 0 0 0 0 0 0 0 0 0 0 0 0
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 14 14 14
36853- 46 46 46 82 82 82 2 2 6 106 106 106
36854-170 170 170 26 26 26 86 86 86 226 226 226
36855-123 123 123 10 10 10 14 14 14 46 46 46
36856-231 231 231 190 190 190 6 6 6 70 70 70
36857- 90 90 90 238 238 238 158 158 158 2 2 6
36858- 2 2 6 2 2 6 2 2 6 2 2 6
36859- 70 70 70 58 58 58 22 22 22 6 6 6
36860- 0 0 0 0 0 0 0 0 0 0 0 0
36861- 0 0 0 0 0 0 0 0 0 0 0 0
36862- 0 0 0 0 0 0 0 0 0 0 0 0
36863- 0 0 0 0 0 0 0 0 0 0 0 0
36864- 0 0 0 0 0 0 0 0 0 0 0 0
36865- 0 0 0 0 0 0 0 0 0 0 0 0
36866- 0 0 0 0 0 0 0 0 1 0 0 0
36867- 0 0 1 0 0 1 0 0 1 0 0 0
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 14 14 14
36873- 42 42 42 86 86 86 6 6 6 116 116 116
36874-106 106 106 6 6 6 70 70 70 149 149 149
36875-128 128 128 18 18 18 38 38 38 54 54 54
36876-221 221 221 106 106 106 2 2 6 14 14 14
36877- 46 46 46 190 190 190 198 198 198 2 2 6
36878- 2 2 6 2 2 6 2 2 6 2 2 6
36879- 74 74 74 62 62 62 22 22 22 6 6 6
36880- 0 0 0 0 0 0 0 0 0 0 0 0
36881- 0 0 0 0 0 0 0 0 0 0 0 0
36882- 0 0 0 0 0 0 0 0 0 0 0 0
36883- 0 0 0 0 0 0 0 0 0 0 0 0
36884- 0 0 0 0 0 0 0 0 0 0 0 0
36885- 0 0 0 0 0 0 0 0 0 0 0 0
36886- 0 0 0 0 0 0 0 0 1 0 0 0
36887- 0 0 1 0 0 0 0 0 1 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 14 14 14
36893- 42 42 42 94 94 94 14 14 14 101 101 101
36894-128 128 128 2 2 6 18 18 18 116 116 116
36895-118 98 46 121 92 8 121 92 8 98 78 10
36896-162 162 162 106 106 106 2 2 6 2 2 6
36897- 2 2 6 195 195 195 195 195 195 6 6 6
36898- 2 2 6 2 2 6 2 2 6 2 2 6
36899- 74 74 74 62 62 62 22 22 22 6 6 6
36900- 0 0 0 0 0 0 0 0 0 0 0 0
36901- 0 0 0 0 0 0 0 0 0 0 0 0
36902- 0 0 0 0 0 0 0 0 0 0 0 0
36903- 0 0 0 0 0 0 0 0 0 0 0 0
36904- 0 0 0 0 0 0 0 0 0 0 0 0
36905- 0 0 0 0 0 0 0 0 0 0 0 0
36906- 0 0 0 0 0 0 0 0 1 0 0 1
36907- 0 0 1 0 0 0 0 0 1 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 10 10 10
36913- 38 38 38 90 90 90 14 14 14 58 58 58
36914-210 210 210 26 26 26 54 38 6 154 114 10
36915-226 170 11 236 186 11 225 175 15 184 144 12
36916-215 174 15 175 146 61 37 26 9 2 2 6
36917- 70 70 70 246 246 246 138 138 138 2 2 6
36918- 2 2 6 2 2 6 2 2 6 2 2 6
36919- 70 70 70 66 66 66 26 26 26 6 6 6
36920- 0 0 0 0 0 0 0 0 0 0 0 0
36921- 0 0 0 0 0 0 0 0 0 0 0 0
36922- 0 0 0 0 0 0 0 0 0 0 0 0
36923- 0 0 0 0 0 0 0 0 0 0 0 0
36924- 0 0 0 0 0 0 0 0 0 0 0 0
36925- 0 0 0 0 0 0 0 0 0 0 0 0
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 10 10 10
36933- 38 38 38 86 86 86 14 14 14 10 10 10
36934-195 195 195 188 164 115 192 133 9 225 175 15
36935-239 182 13 234 190 10 232 195 16 232 200 30
36936-245 207 45 241 208 19 232 195 16 184 144 12
36937-218 194 134 211 206 186 42 42 42 2 2 6
36938- 2 2 6 2 2 6 2 2 6 2 2 6
36939- 50 50 50 74 74 74 30 30 30 6 6 6
36940- 0 0 0 0 0 0 0 0 0 0 0 0
36941- 0 0 0 0 0 0 0 0 0 0 0 0
36942- 0 0 0 0 0 0 0 0 0 0 0 0
36943- 0 0 0 0 0 0 0 0 0 0 0 0
36944- 0 0 0 0 0 0 0 0 0 0 0 0
36945- 0 0 0 0 0 0 0 0 0 0 0 0
36946- 0 0 0 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 10 10 10
36953- 34 34 34 86 86 86 14 14 14 2 2 6
36954-121 87 25 192 133 9 219 162 10 239 182 13
36955-236 186 11 232 195 16 241 208 19 244 214 54
36956-246 218 60 246 218 38 246 215 20 241 208 19
36957-241 208 19 226 184 13 121 87 25 2 2 6
36958- 2 2 6 2 2 6 2 2 6 2 2 6
36959- 50 50 50 82 82 82 34 34 34 10 10 10
36960- 0 0 0 0 0 0 0 0 0 0 0 0
36961- 0 0 0 0 0 0 0 0 0 0 0 0
36962- 0 0 0 0 0 0 0 0 0 0 0 0
36963- 0 0 0 0 0 0 0 0 0 0 0 0
36964- 0 0 0 0 0 0 0 0 0 0 0 0
36965- 0 0 0 0 0 0 0 0 0 0 0 0
36966- 0 0 0 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 10 10 10
36973- 34 34 34 82 82 82 30 30 30 61 42 6
36974-180 123 7 206 145 10 230 174 11 239 182 13
36975-234 190 10 238 202 15 241 208 19 246 218 74
36976-246 218 38 246 215 20 246 215 20 246 215 20
36977-226 184 13 215 174 15 184 144 12 6 6 6
36978- 2 2 6 2 2 6 2 2 6 2 2 6
36979- 26 26 26 94 94 94 42 42 42 14 14 14
36980- 0 0 0 0 0 0 0 0 0 0 0 0
36981- 0 0 0 0 0 0 0 0 0 0 0 0
36982- 0 0 0 0 0 0 0 0 0 0 0 0
36983- 0 0 0 0 0 0 0 0 0 0 0 0
36984- 0 0 0 0 0 0 0 0 0 0 0 0
36985- 0 0 0 0 0 0 0 0 0 0 0 0
36986- 0 0 0 0 0 0 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 10 10 10
36993- 30 30 30 78 78 78 50 50 50 104 69 6
36994-192 133 9 216 158 10 236 178 12 236 186 11
36995-232 195 16 241 208 19 244 214 54 245 215 43
36996-246 215 20 246 215 20 241 208 19 198 155 10
36997-200 144 11 216 158 10 156 118 10 2 2 6
36998- 2 2 6 2 2 6 2 2 6 2 2 6
36999- 6 6 6 90 90 90 54 54 54 18 18 18
37000- 6 6 6 0 0 0 0 0 0 0 0 0
37001- 0 0 0 0 0 0 0 0 0 0 0 0
37002- 0 0 0 0 0 0 0 0 0 0 0 0
37003- 0 0 0 0 0 0 0 0 0 0 0 0
37004- 0 0 0 0 0 0 0 0 0 0 0 0
37005- 0 0 0 0 0 0 0 0 0 0 0 0
37006- 0 0 0 0 0 0 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 10 10 10
37013- 30 30 30 78 78 78 46 46 46 22 22 22
37014-137 92 6 210 162 10 239 182 13 238 190 10
37015-238 202 15 241 208 19 246 215 20 246 215 20
37016-241 208 19 203 166 17 185 133 11 210 150 10
37017-216 158 10 210 150 10 102 78 10 2 2 6
37018- 6 6 6 54 54 54 14 14 14 2 2 6
37019- 2 2 6 62 62 62 74 74 74 30 30 30
37020- 10 10 10 0 0 0 0 0 0 0 0 0
37021- 0 0 0 0 0 0 0 0 0 0 0 0
37022- 0 0 0 0 0 0 0 0 0 0 0 0
37023- 0 0 0 0 0 0 0 0 0 0 0 0
37024- 0 0 0 0 0 0 0 0 0 0 0 0
37025- 0 0 0 0 0 0 0 0 0 0 0 0
37026- 0 0 0 0 0 0 0 0 0 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 10 10 10
37033- 34 34 34 78 78 78 50 50 50 6 6 6
37034- 94 70 30 139 102 15 190 146 13 226 184 13
37035-232 200 30 232 195 16 215 174 15 190 146 13
37036-168 122 10 192 133 9 210 150 10 213 154 11
37037-202 150 34 182 157 106 101 98 89 2 2 6
37038- 2 2 6 78 78 78 116 116 116 58 58 58
37039- 2 2 6 22 22 22 90 90 90 46 46 46
37040- 18 18 18 6 6 6 0 0 0 0 0 0
37041- 0 0 0 0 0 0 0 0 0 0 0 0
37042- 0 0 0 0 0 0 0 0 0 0 0 0
37043- 0 0 0 0 0 0 0 0 0 0 0 0
37044- 0 0 0 0 0 0 0 0 0 0 0 0
37045- 0 0 0 0 0 0 0 0 0 0 0 0
37046- 0 0 0 0 0 0 0 0 0 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 10 10 10
37053- 38 38 38 86 86 86 50 50 50 6 6 6
37054-128 128 128 174 154 114 156 107 11 168 122 10
37055-198 155 10 184 144 12 197 138 11 200 144 11
37056-206 145 10 206 145 10 197 138 11 188 164 115
37057-195 195 195 198 198 198 174 174 174 14 14 14
37058- 2 2 6 22 22 22 116 116 116 116 116 116
37059- 22 22 22 2 2 6 74 74 74 70 70 70
37060- 30 30 30 10 10 10 0 0 0 0 0 0
37061- 0 0 0 0 0 0 0 0 0 0 0 0
37062- 0 0 0 0 0 0 0 0 0 0 0 0
37063- 0 0 0 0 0 0 0 0 0 0 0 0
37064- 0 0 0 0 0 0 0 0 0 0 0 0
37065- 0 0 0 0 0 0 0 0 0 0 0 0
37066- 0 0 0 0 0 0 0 0 0 0 0 0
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 6 6 6 18 18 18
37073- 50 50 50 101 101 101 26 26 26 10 10 10
37074-138 138 138 190 190 190 174 154 114 156 107 11
37075-197 138 11 200 144 11 197 138 11 192 133 9
37076-180 123 7 190 142 34 190 178 144 187 187 187
37077-202 202 202 221 221 221 214 214 214 66 66 66
37078- 2 2 6 2 2 6 50 50 50 62 62 62
37079- 6 6 6 2 2 6 10 10 10 90 90 90
37080- 50 50 50 18 18 18 6 6 6 0 0 0
37081- 0 0 0 0 0 0 0 0 0 0 0 0
37082- 0 0 0 0 0 0 0 0 0 0 0 0
37083- 0 0 0 0 0 0 0 0 0 0 0 0
37084- 0 0 0 0 0 0 0 0 0 0 0 0
37085- 0 0 0 0 0 0 0 0 0 0 0 0
37086- 0 0 0 0 0 0 0 0 0 0 0 0
37087- 0 0 0 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 10 10 10 34 34 34
37093- 74 74 74 74 74 74 2 2 6 6 6 6
37094-144 144 144 198 198 198 190 190 190 178 166 146
37095-154 121 60 156 107 11 156 107 11 168 124 44
37096-174 154 114 187 187 187 190 190 190 210 210 210
37097-246 246 246 253 253 253 253 253 253 182 182 182
37098- 6 6 6 2 2 6 2 2 6 2 2 6
37099- 2 2 6 2 2 6 2 2 6 62 62 62
37100- 74 74 74 34 34 34 14 14 14 0 0 0
37101- 0 0 0 0 0 0 0 0 0 0 0 0
37102- 0 0 0 0 0 0 0 0 0 0 0 0
37103- 0 0 0 0 0 0 0 0 0 0 0 0
37104- 0 0 0 0 0 0 0 0 0 0 0 0
37105- 0 0 0 0 0 0 0 0 0 0 0 0
37106- 0 0 0 0 0 0 0 0 0 0 0 0
37107- 0 0 0 0 0 0 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 10 10 10 22 22 22 54 54 54
37113- 94 94 94 18 18 18 2 2 6 46 46 46
37114-234 234 234 221 221 221 190 190 190 190 190 190
37115-190 190 190 187 187 187 187 187 187 190 190 190
37116-190 190 190 195 195 195 214 214 214 242 242 242
37117-253 253 253 253 253 253 253 253 253 253 253 253
37118- 82 82 82 2 2 6 2 2 6 2 2 6
37119- 2 2 6 2 2 6 2 2 6 14 14 14
37120- 86 86 86 54 54 54 22 22 22 6 6 6
37121- 0 0 0 0 0 0 0 0 0 0 0 0
37122- 0 0 0 0 0 0 0 0 0 0 0 0
37123- 0 0 0 0 0 0 0 0 0 0 0 0
37124- 0 0 0 0 0 0 0 0 0 0 0 0
37125- 0 0 0 0 0 0 0 0 0 0 0 0
37126- 0 0 0 0 0 0 0 0 0 0 0 0
37127- 0 0 0 0 0 0 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 6 6 6 18 18 18 46 46 46 90 90 90
37133- 46 46 46 18 18 18 6 6 6 182 182 182
37134-253 253 253 246 246 246 206 206 206 190 190 190
37135-190 190 190 190 190 190 190 190 190 190 190 190
37136-206 206 206 231 231 231 250 250 250 253 253 253
37137-253 253 253 253 253 253 253 253 253 253 253 253
37138-202 202 202 14 14 14 2 2 6 2 2 6
37139- 2 2 6 2 2 6 2 2 6 2 2 6
37140- 42 42 42 86 86 86 42 42 42 18 18 18
37141- 6 6 6 0 0 0 0 0 0 0 0 0
37142- 0 0 0 0 0 0 0 0 0 0 0 0
37143- 0 0 0 0 0 0 0 0 0 0 0 0
37144- 0 0 0 0 0 0 0 0 0 0 0 0
37145- 0 0 0 0 0 0 0 0 0 0 0 0
37146- 0 0 0 0 0 0 0 0 0 0 0 0
37147- 0 0 0 0 0 0 0 0 0 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 6 6 6
37152- 14 14 14 38 38 38 74 74 74 66 66 66
37153- 2 2 6 6 6 6 90 90 90 250 250 250
37154-253 253 253 253 253 253 238 238 238 198 198 198
37155-190 190 190 190 190 190 195 195 195 221 221 221
37156-246 246 246 253 253 253 253 253 253 253 253 253
37157-253 253 253 253 253 253 253 253 253 253 253 253
37158-253 253 253 82 82 82 2 2 6 2 2 6
37159- 2 2 6 2 2 6 2 2 6 2 2 6
37160- 2 2 6 78 78 78 70 70 70 34 34 34
37161- 14 14 14 6 6 6 0 0 0 0 0 0
37162- 0 0 0 0 0 0 0 0 0 0 0 0
37163- 0 0 0 0 0 0 0 0 0 0 0 0
37164- 0 0 0 0 0 0 0 0 0 0 0 0
37165- 0 0 0 0 0 0 0 0 0 0 0 0
37166- 0 0 0 0 0 0 0 0 0 0 0 0
37167- 0 0 0 0 0 0 0 0 0 0 0 0
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 14 14 14
37172- 34 34 34 66 66 66 78 78 78 6 6 6
37173- 2 2 6 18 18 18 218 218 218 253 253 253
37174-253 253 253 253 253 253 253 253 253 246 246 246
37175-226 226 226 231 231 231 246 246 246 253 253 253
37176-253 253 253 253 253 253 253 253 253 253 253 253
37177-253 253 253 253 253 253 253 253 253 253 253 253
37178-253 253 253 178 178 178 2 2 6 2 2 6
37179- 2 2 6 2 2 6 2 2 6 2 2 6
37180- 2 2 6 18 18 18 90 90 90 62 62 62
37181- 30 30 30 10 10 10 0 0 0 0 0 0
37182- 0 0 0 0 0 0 0 0 0 0 0 0
37183- 0 0 0 0 0 0 0 0 0 0 0 0
37184- 0 0 0 0 0 0 0 0 0 0 0 0
37185- 0 0 0 0 0 0 0 0 0 0 0 0
37186- 0 0 0 0 0 0 0 0 0 0 0 0
37187- 0 0 0 0 0 0 0 0 0 0 0 0
37188- 0 0 0 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 10 10 10 26 26 26
37192- 58 58 58 90 90 90 18 18 18 2 2 6
37193- 2 2 6 110 110 110 253 253 253 253 253 253
37194-253 253 253 253 253 253 253 253 253 253 253 253
37195-250 250 250 253 253 253 253 253 253 253 253 253
37196-253 253 253 253 253 253 253 253 253 253 253 253
37197-253 253 253 253 253 253 253 253 253 253 253 253
37198-253 253 253 231 231 231 18 18 18 2 2 6
37199- 2 2 6 2 2 6 2 2 6 2 2 6
37200- 2 2 6 2 2 6 18 18 18 94 94 94
37201- 54 54 54 26 26 26 10 10 10 0 0 0
37202- 0 0 0 0 0 0 0 0 0 0 0 0
37203- 0 0 0 0 0 0 0 0 0 0 0 0
37204- 0 0 0 0 0 0 0 0 0 0 0 0
37205- 0 0 0 0 0 0 0 0 0 0 0 0
37206- 0 0 0 0 0 0 0 0 0 0 0 0
37207- 0 0 0 0 0 0 0 0 0 0 0 0
37208- 0 0 0 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 6 6 6 22 22 22 50 50 50
37212- 90 90 90 26 26 26 2 2 6 2 2 6
37213- 14 14 14 195 195 195 250 250 250 253 253 253
37214-253 253 253 253 253 253 253 253 253 253 253 253
37215-253 253 253 253 253 253 253 253 253 253 253 253
37216-253 253 253 253 253 253 253 253 253 253 253 253
37217-253 253 253 253 253 253 253 253 253 253 253 253
37218-250 250 250 242 242 242 54 54 54 2 2 6
37219- 2 2 6 2 2 6 2 2 6 2 2 6
37220- 2 2 6 2 2 6 2 2 6 38 38 38
37221- 86 86 86 50 50 50 22 22 22 6 6 6
37222- 0 0 0 0 0 0 0 0 0 0 0 0
37223- 0 0 0 0 0 0 0 0 0 0 0 0
37224- 0 0 0 0 0 0 0 0 0 0 0 0
37225- 0 0 0 0 0 0 0 0 0 0 0 0
37226- 0 0 0 0 0 0 0 0 0 0 0 0
37227- 0 0 0 0 0 0 0 0 0 0 0 0
37228- 0 0 0 0 0 0 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 6 6 6 14 14 14 38 38 38 82 82 82
37232- 34 34 34 2 2 6 2 2 6 2 2 6
37233- 42 42 42 195 195 195 246 246 246 253 253 253
37234-253 253 253 253 253 253 253 253 253 250 250 250
37235-242 242 242 242 242 242 250 250 250 253 253 253
37236-253 253 253 253 253 253 253 253 253 253 253 253
37237-253 253 253 250 250 250 246 246 246 238 238 238
37238-226 226 226 231 231 231 101 101 101 6 6 6
37239- 2 2 6 2 2 6 2 2 6 2 2 6
37240- 2 2 6 2 2 6 2 2 6 2 2 6
37241- 38 38 38 82 82 82 42 42 42 14 14 14
37242- 6 6 6 0 0 0 0 0 0 0 0 0
37243- 0 0 0 0 0 0 0 0 0 0 0 0
37244- 0 0 0 0 0 0 0 0 0 0 0 0
37245- 0 0 0 0 0 0 0 0 0 0 0 0
37246- 0 0 0 0 0 0 0 0 0 0 0 0
37247- 0 0 0 0 0 0 0 0 0 0 0 0
37248- 0 0 0 0 0 0 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 10 10 10 26 26 26 62 62 62 66 66 66
37252- 2 2 6 2 2 6 2 2 6 6 6 6
37253- 70 70 70 170 170 170 206 206 206 234 234 234
37254-246 246 246 250 250 250 250 250 250 238 238 238
37255-226 226 226 231 231 231 238 238 238 250 250 250
37256-250 250 250 250 250 250 246 246 246 231 231 231
37257-214 214 214 206 206 206 202 202 202 202 202 202
37258-198 198 198 202 202 202 182 182 182 18 18 18
37259- 2 2 6 2 2 6 2 2 6 2 2 6
37260- 2 2 6 2 2 6 2 2 6 2 2 6
37261- 2 2 6 62 62 62 66 66 66 30 30 30
37262- 10 10 10 0 0 0 0 0 0 0 0 0
37263- 0 0 0 0 0 0 0 0 0 0 0 0
37264- 0 0 0 0 0 0 0 0 0 0 0 0
37265- 0 0 0 0 0 0 0 0 0 0 0 0
37266- 0 0 0 0 0 0 0 0 0 0 0 0
37267- 0 0 0 0 0 0 0 0 0 0 0 0
37268- 0 0 0 0 0 0 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 14 14 14 42 42 42 82 82 82 18 18 18
37272- 2 2 6 2 2 6 2 2 6 10 10 10
37273- 94 94 94 182 182 182 218 218 218 242 242 242
37274-250 250 250 253 253 253 253 253 253 250 250 250
37275-234 234 234 253 253 253 253 253 253 253 253 253
37276-253 253 253 253 253 253 253 253 253 246 246 246
37277-238 238 238 226 226 226 210 210 210 202 202 202
37278-195 195 195 195 195 195 210 210 210 158 158 158
37279- 6 6 6 14 14 14 50 50 50 14 14 14
37280- 2 2 6 2 2 6 2 2 6 2 2 6
37281- 2 2 6 6 6 6 86 86 86 46 46 46
37282- 18 18 18 6 6 6 0 0 0 0 0 0
37283- 0 0 0 0 0 0 0 0 0 0 0 0
37284- 0 0 0 0 0 0 0 0 0 0 0 0
37285- 0 0 0 0 0 0 0 0 0 0 0 0
37286- 0 0 0 0 0 0 0 0 0 0 0 0
37287- 0 0 0 0 0 0 0 0 0 0 0 0
37288- 0 0 0 0 0 0 0 0 0 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 6 6 6
37291- 22 22 22 54 54 54 70 70 70 2 2 6
37292- 2 2 6 10 10 10 2 2 6 22 22 22
37293-166 166 166 231 231 231 250 250 250 253 253 253
37294-253 253 253 253 253 253 253 253 253 250 250 250
37295-242 242 242 253 253 253 253 253 253 253 253 253
37296-253 253 253 253 253 253 253 253 253 253 253 253
37297-253 253 253 253 253 253 253 253 253 246 246 246
37298-231 231 231 206 206 206 198 198 198 226 226 226
37299- 94 94 94 2 2 6 6 6 6 38 38 38
37300- 30 30 30 2 2 6 2 2 6 2 2 6
37301- 2 2 6 2 2 6 62 62 62 66 66 66
37302- 26 26 26 10 10 10 0 0 0 0 0 0
37303- 0 0 0 0 0 0 0 0 0 0 0 0
37304- 0 0 0 0 0 0 0 0 0 0 0 0
37305- 0 0 0 0 0 0 0 0 0 0 0 0
37306- 0 0 0 0 0 0 0 0 0 0 0 0
37307- 0 0 0 0 0 0 0 0 0 0 0 0
37308- 0 0 0 0 0 0 0 0 0 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 10 10 10
37311- 30 30 30 74 74 74 50 50 50 2 2 6
37312- 26 26 26 26 26 26 2 2 6 106 106 106
37313-238 238 238 253 253 253 253 253 253 253 253 253
37314-253 253 253 253 253 253 253 253 253 253 253 253
37315-253 253 253 253 253 253 253 253 253 253 253 253
37316-253 253 253 253 253 253 253 253 253 253 253 253
37317-253 253 253 253 253 253 253 253 253 253 253 253
37318-253 253 253 246 246 246 218 218 218 202 202 202
37319-210 210 210 14 14 14 2 2 6 2 2 6
37320- 30 30 30 22 22 22 2 2 6 2 2 6
37321- 2 2 6 2 2 6 18 18 18 86 86 86
37322- 42 42 42 14 14 14 0 0 0 0 0 0
37323- 0 0 0 0 0 0 0 0 0 0 0 0
37324- 0 0 0 0 0 0 0 0 0 0 0 0
37325- 0 0 0 0 0 0 0 0 0 0 0 0
37326- 0 0 0 0 0 0 0 0 0 0 0 0
37327- 0 0 0 0 0 0 0 0 0 0 0 0
37328- 0 0 0 0 0 0 0 0 0 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 14 14 14
37331- 42 42 42 90 90 90 22 22 22 2 2 6
37332- 42 42 42 2 2 6 18 18 18 218 218 218
37333-253 253 253 253 253 253 253 253 253 253 253 253
37334-253 253 253 253 253 253 253 253 253 253 253 253
37335-253 253 253 253 253 253 253 253 253 253 253 253
37336-253 253 253 253 253 253 253 253 253 253 253 253
37337-253 253 253 253 253 253 253 253 253 253 253 253
37338-253 253 253 253 253 253 250 250 250 221 221 221
37339-218 218 218 101 101 101 2 2 6 14 14 14
37340- 18 18 18 38 38 38 10 10 10 2 2 6
37341- 2 2 6 2 2 6 2 2 6 78 78 78
37342- 58 58 58 22 22 22 6 6 6 0 0 0
37343- 0 0 0 0 0 0 0 0 0 0 0 0
37344- 0 0 0 0 0 0 0 0 0 0 0 0
37345- 0 0 0 0 0 0 0 0 0 0 0 0
37346- 0 0 0 0 0 0 0 0 0 0 0 0
37347- 0 0 0 0 0 0 0 0 0 0 0 0
37348- 0 0 0 0 0 0 0 0 0 0 0 0
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 6 6 6 18 18 18
37351- 54 54 54 82 82 82 2 2 6 26 26 26
37352- 22 22 22 2 2 6 123 123 123 253 253 253
37353-253 253 253 253 253 253 253 253 253 253 253 253
37354-253 253 253 253 253 253 253 253 253 253 253 253
37355-253 253 253 253 253 253 253 253 253 253 253 253
37356-253 253 253 253 253 253 253 253 253 253 253 253
37357-253 253 253 253 253 253 253 253 253 253 253 253
37358-253 253 253 253 253 253 253 253 253 250 250 250
37359-238 238 238 198 198 198 6 6 6 38 38 38
37360- 58 58 58 26 26 26 38 38 38 2 2 6
37361- 2 2 6 2 2 6 2 2 6 46 46 46
37362- 78 78 78 30 30 30 10 10 10 0 0 0
37363- 0 0 0 0 0 0 0 0 0 0 0 0
37364- 0 0 0 0 0 0 0 0 0 0 0 0
37365- 0 0 0 0 0 0 0 0 0 0 0 0
37366- 0 0 0 0 0 0 0 0 0 0 0 0
37367- 0 0 0 0 0 0 0 0 0 0 0 0
37368- 0 0 0 0 0 0 0 0 0 0 0 0
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 10 10 10 30 30 30
37371- 74 74 74 58 58 58 2 2 6 42 42 42
37372- 2 2 6 22 22 22 231 231 231 253 253 253
37373-253 253 253 253 253 253 253 253 253 253 253 253
37374-253 253 253 253 253 253 253 253 253 250 250 250
37375-253 253 253 253 253 253 253 253 253 253 253 253
37376-253 253 253 253 253 253 253 253 253 253 253 253
37377-253 253 253 253 253 253 253 253 253 253 253 253
37378-253 253 253 253 253 253 253 253 253 253 253 253
37379-253 253 253 246 246 246 46 46 46 38 38 38
37380- 42 42 42 14 14 14 38 38 38 14 14 14
37381- 2 2 6 2 2 6 2 2 6 6 6 6
37382- 86 86 86 46 46 46 14 14 14 0 0 0
37383- 0 0 0 0 0 0 0 0 0 0 0 0
37384- 0 0 0 0 0 0 0 0 0 0 0 0
37385- 0 0 0 0 0 0 0 0 0 0 0 0
37386- 0 0 0 0 0 0 0 0 0 0 0 0
37387- 0 0 0 0 0 0 0 0 0 0 0 0
37388- 0 0 0 0 0 0 0 0 0 0 0 0
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 6 6 6 14 14 14 42 42 42
37391- 90 90 90 18 18 18 18 18 18 26 26 26
37392- 2 2 6 116 116 116 253 253 253 253 253 253
37393-253 253 253 253 253 253 253 253 253 253 253 253
37394-253 253 253 253 253 253 250 250 250 238 238 238
37395-253 253 253 253 253 253 253 253 253 253 253 253
37396-253 253 253 253 253 253 253 253 253 253 253 253
37397-253 253 253 253 253 253 253 253 253 253 253 253
37398-253 253 253 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 94 94 94 6 6 6
37400- 2 2 6 2 2 6 10 10 10 34 34 34
37401- 2 2 6 2 2 6 2 2 6 2 2 6
37402- 74 74 74 58 58 58 22 22 22 6 6 6
37403- 0 0 0 0 0 0 0 0 0 0 0 0
37404- 0 0 0 0 0 0 0 0 0 0 0 0
37405- 0 0 0 0 0 0 0 0 0 0 0 0
37406- 0 0 0 0 0 0 0 0 0 0 0 0
37407- 0 0 0 0 0 0 0 0 0 0 0 0
37408- 0 0 0 0 0 0 0 0 0 0 0 0
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 10 10 10 26 26 26 66 66 66
37411- 82 82 82 2 2 6 38 38 38 6 6 6
37412- 14 14 14 210 210 210 253 253 253 253 253 253
37413-253 253 253 253 253 253 253 253 253 253 253 253
37414-253 253 253 253 253 253 246 246 246 242 242 242
37415-253 253 253 253 253 253 253 253 253 253 253 253
37416-253 253 253 253 253 253 253 253 253 253 253 253
37417-253 253 253 253 253 253 253 253 253 253 253 253
37418-253 253 253 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 144 144 144 2 2 6
37420- 2 2 6 2 2 6 2 2 6 46 46 46
37421- 2 2 6 2 2 6 2 2 6 2 2 6
37422- 42 42 42 74 74 74 30 30 30 10 10 10
37423- 0 0 0 0 0 0 0 0 0 0 0 0
37424- 0 0 0 0 0 0 0 0 0 0 0 0
37425- 0 0 0 0 0 0 0 0 0 0 0 0
37426- 0 0 0 0 0 0 0 0 0 0 0 0
37427- 0 0 0 0 0 0 0 0 0 0 0 0
37428- 0 0 0 0 0 0 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 6 6 6 14 14 14 42 42 42 90 90 90
37431- 26 26 26 6 6 6 42 42 42 2 2 6
37432- 74 74 74 250 250 250 253 253 253 253 253 253
37433-253 253 253 253 253 253 253 253 253 253 253 253
37434-253 253 253 253 253 253 242 242 242 242 242 242
37435-253 253 253 253 253 253 253 253 253 253 253 253
37436-253 253 253 253 253 253 253 253 253 253 253 253
37437-253 253 253 253 253 253 253 253 253 253 253 253
37438-253 253 253 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 182 182 182 2 2 6
37440- 2 2 6 2 2 6 2 2 6 46 46 46
37441- 2 2 6 2 2 6 2 2 6 2 2 6
37442- 10 10 10 86 86 86 38 38 38 10 10 10
37443- 0 0 0 0 0 0 0 0 0 0 0 0
37444- 0 0 0 0 0 0 0 0 0 0 0 0
37445- 0 0 0 0 0 0 0 0 0 0 0 0
37446- 0 0 0 0 0 0 0 0 0 0 0 0
37447- 0 0 0 0 0 0 0 0 0 0 0 0
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 10 10 10 26 26 26 66 66 66 82 82 82
37451- 2 2 6 22 22 22 18 18 18 2 2 6
37452-149 149 149 253 253 253 253 253 253 253 253 253
37453-253 253 253 253 253 253 253 253 253 253 253 253
37454-253 253 253 253 253 253 234 234 234 242 242 242
37455-253 253 253 253 253 253 253 253 253 253 253 253
37456-253 253 253 253 253 253 253 253 253 253 253 253
37457-253 253 253 253 253 253 253 253 253 253 253 253
37458-253 253 253 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 206 206 206 2 2 6
37460- 2 2 6 2 2 6 2 2 6 38 38 38
37461- 2 2 6 2 2 6 2 2 6 2 2 6
37462- 6 6 6 86 86 86 46 46 46 14 14 14
37463- 0 0 0 0 0 0 0 0 0 0 0 0
37464- 0 0 0 0 0 0 0 0 0 0 0 0
37465- 0 0 0 0 0 0 0 0 0 0 0 0
37466- 0 0 0 0 0 0 0 0 0 0 0 0
37467- 0 0 0 0 0 0 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 6 6 6
37470- 18 18 18 46 46 46 86 86 86 18 18 18
37471- 2 2 6 34 34 34 10 10 10 6 6 6
37472-210 210 210 253 253 253 253 253 253 253 253 253
37473-253 253 253 253 253 253 253 253 253 253 253 253
37474-253 253 253 253 253 253 234 234 234 242 242 242
37475-253 253 253 253 253 253 253 253 253 253 253 253
37476-253 253 253 253 253 253 253 253 253 253 253 253
37477-253 253 253 253 253 253 253 253 253 253 253 253
37478-253 253 253 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 221 221 221 6 6 6
37480- 2 2 6 2 2 6 6 6 6 30 30 30
37481- 2 2 6 2 2 6 2 2 6 2 2 6
37482- 2 2 6 82 82 82 54 54 54 18 18 18
37483- 6 6 6 0 0 0 0 0 0 0 0 0
37484- 0 0 0 0 0 0 0 0 0 0 0 0
37485- 0 0 0 0 0 0 0 0 0 0 0 0
37486- 0 0 0 0 0 0 0 0 0 0 0 0
37487- 0 0 0 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 10 10 10
37490- 26 26 26 66 66 66 62 62 62 2 2 6
37491- 2 2 6 38 38 38 10 10 10 26 26 26
37492-238 238 238 253 253 253 253 253 253 253 253 253
37493-253 253 253 253 253 253 253 253 253 253 253 253
37494-253 253 253 253 253 253 231 231 231 238 238 238
37495-253 253 253 253 253 253 253 253 253 253 253 253
37496-253 253 253 253 253 253 253 253 253 253 253 253
37497-253 253 253 253 253 253 253 253 253 253 253 253
37498-253 253 253 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 231 231 231 6 6 6
37500- 2 2 6 2 2 6 10 10 10 30 30 30
37501- 2 2 6 2 2 6 2 2 6 2 2 6
37502- 2 2 6 66 66 66 58 58 58 22 22 22
37503- 6 6 6 0 0 0 0 0 0 0 0 0
37504- 0 0 0 0 0 0 0 0 0 0 0 0
37505- 0 0 0 0 0 0 0 0 0 0 0 0
37506- 0 0 0 0 0 0 0 0 0 0 0 0
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 10 10 10
37510- 38 38 38 78 78 78 6 6 6 2 2 6
37511- 2 2 6 46 46 46 14 14 14 42 42 42
37512-246 246 246 253 253 253 253 253 253 253 253 253
37513-253 253 253 253 253 253 253 253 253 253 253 253
37514-253 253 253 253 253 253 231 231 231 242 242 242
37515-253 253 253 253 253 253 253 253 253 253 253 253
37516-253 253 253 253 253 253 253 253 253 253 253 253
37517-253 253 253 253 253 253 253 253 253 253 253 253
37518-253 253 253 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 234 234 234 10 10 10
37520- 2 2 6 2 2 6 22 22 22 14 14 14
37521- 2 2 6 2 2 6 2 2 6 2 2 6
37522- 2 2 6 66 66 66 62 62 62 22 22 22
37523- 6 6 6 0 0 0 0 0 0 0 0 0
37524- 0 0 0 0 0 0 0 0 0 0 0 0
37525- 0 0 0 0 0 0 0 0 0 0 0 0
37526- 0 0 0 0 0 0 0 0 0 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 6 6 6 18 18 18
37530- 50 50 50 74 74 74 2 2 6 2 2 6
37531- 14 14 14 70 70 70 34 34 34 62 62 62
37532-250 250 250 253 253 253 253 253 253 253 253 253
37533-253 253 253 253 253 253 253 253 253 253 253 253
37534-253 253 253 253 253 253 231 231 231 246 246 246
37535-253 253 253 253 253 253 253 253 253 253 253 253
37536-253 253 253 253 253 253 253 253 253 253 253 253
37537-253 253 253 253 253 253 253 253 253 253 253 253
37538-253 253 253 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 234 234 234 14 14 14
37540- 2 2 6 2 2 6 30 30 30 2 2 6
37541- 2 2 6 2 2 6 2 2 6 2 2 6
37542- 2 2 6 66 66 66 62 62 62 22 22 22
37543- 6 6 6 0 0 0 0 0 0 0 0 0
37544- 0 0 0 0 0 0 0 0 0 0 0 0
37545- 0 0 0 0 0 0 0 0 0 0 0 0
37546- 0 0 0 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 6 6 6 18 18 18
37550- 54 54 54 62 62 62 2 2 6 2 2 6
37551- 2 2 6 30 30 30 46 46 46 70 70 70
37552-250 250 250 253 253 253 253 253 253 253 253 253
37553-253 253 253 253 253 253 253 253 253 253 253 253
37554-253 253 253 253 253 253 231 231 231 246 246 246
37555-253 253 253 253 253 253 253 253 253 253 253 253
37556-253 253 253 253 253 253 253 253 253 253 253 253
37557-253 253 253 253 253 253 253 253 253 253 253 253
37558-253 253 253 253 253 253 253 253 253 253 253 253
37559-253 253 253 253 253 253 226 226 226 10 10 10
37560- 2 2 6 6 6 6 30 30 30 2 2 6
37561- 2 2 6 2 2 6 2 2 6 2 2 6
37562- 2 2 6 66 66 66 58 58 58 22 22 22
37563- 6 6 6 0 0 0 0 0 0 0 0 0
37564- 0 0 0 0 0 0 0 0 0 0 0 0
37565- 0 0 0 0 0 0 0 0 0 0 0 0
37566- 0 0 0 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 6 6 6 22 22 22
37570- 58 58 58 62 62 62 2 2 6 2 2 6
37571- 2 2 6 2 2 6 30 30 30 78 78 78
37572-250 250 250 253 253 253 253 253 253 253 253 253
37573-253 253 253 253 253 253 253 253 253 253 253 253
37574-253 253 253 253 253 253 231 231 231 246 246 246
37575-253 253 253 253 253 253 253 253 253 253 253 253
37576-253 253 253 253 253 253 253 253 253 253 253 253
37577-253 253 253 253 253 253 253 253 253 253 253 253
37578-253 253 253 253 253 253 253 253 253 253 253 253
37579-253 253 253 253 253 253 206 206 206 2 2 6
37580- 22 22 22 34 34 34 18 14 6 22 22 22
37581- 26 26 26 18 18 18 6 6 6 2 2 6
37582- 2 2 6 82 82 82 54 54 54 18 18 18
37583- 6 6 6 0 0 0 0 0 0 0 0 0
37584- 0 0 0 0 0 0 0 0 0 0 0 0
37585- 0 0 0 0 0 0 0 0 0 0 0 0
37586- 0 0 0 0 0 0 0 0 0 0 0 0
37587- 0 0 0 0 0 0 0 0 0 0 0 0
37588- 0 0 0 0 0 0 0 0 0 0 0 0
37589- 0 0 0 0 0 0 6 6 6 26 26 26
37590- 62 62 62 106 106 106 74 54 14 185 133 11
37591-210 162 10 121 92 8 6 6 6 62 62 62
37592-238 238 238 253 253 253 253 253 253 253 253 253
37593-253 253 253 253 253 253 253 253 253 253 253 253
37594-253 253 253 253 253 253 231 231 231 246 246 246
37595-253 253 253 253 253 253 253 253 253 253 253 253
37596-253 253 253 253 253 253 253 253 253 253 253 253
37597-253 253 253 253 253 253 253 253 253 253 253 253
37598-253 253 253 253 253 253 253 253 253 253 253 253
37599-253 253 253 253 253 253 158 158 158 18 18 18
37600- 14 14 14 2 2 6 2 2 6 2 2 6
37601- 6 6 6 18 18 18 66 66 66 38 38 38
37602- 6 6 6 94 94 94 50 50 50 18 18 18
37603- 6 6 6 0 0 0 0 0 0 0 0 0
37604- 0 0 0 0 0 0 0 0 0 0 0 0
37605- 0 0 0 0 0 0 0 0 0 0 0 0
37606- 0 0 0 0 0 0 0 0 0 0 0 0
37607- 0 0 0 0 0 0 0 0 0 0 0 0
37608- 0 0 0 0 0 0 0 0 0 6 6 6
37609- 10 10 10 10 10 10 18 18 18 38 38 38
37610- 78 78 78 142 134 106 216 158 10 242 186 14
37611-246 190 14 246 190 14 156 118 10 10 10 10
37612- 90 90 90 238 238 238 253 253 253 253 253 253
37613-253 253 253 253 253 253 253 253 253 253 253 253
37614-253 253 253 253 253 253 231 231 231 250 250 250
37615-253 253 253 253 253 253 253 253 253 253 253 253
37616-253 253 253 253 253 253 253 253 253 253 253 253
37617-253 253 253 253 253 253 253 253 253 253 253 253
37618-253 253 253 253 253 253 253 253 253 246 230 190
37619-238 204 91 238 204 91 181 142 44 37 26 9
37620- 2 2 6 2 2 6 2 2 6 2 2 6
37621- 2 2 6 2 2 6 38 38 38 46 46 46
37622- 26 26 26 106 106 106 54 54 54 18 18 18
37623- 6 6 6 0 0 0 0 0 0 0 0 0
37624- 0 0 0 0 0 0 0 0 0 0 0 0
37625- 0 0 0 0 0 0 0 0 0 0 0 0
37626- 0 0 0 0 0 0 0 0 0 0 0 0
37627- 0 0 0 0 0 0 0 0 0 0 0 0
37628- 0 0 0 6 6 6 14 14 14 22 22 22
37629- 30 30 30 38 38 38 50 50 50 70 70 70
37630-106 106 106 190 142 34 226 170 11 242 186 14
37631-246 190 14 246 190 14 246 190 14 154 114 10
37632- 6 6 6 74 74 74 226 226 226 253 253 253
37633-253 253 253 253 253 253 253 253 253 253 253 253
37634-253 253 253 253 253 253 231 231 231 250 250 250
37635-253 253 253 253 253 253 253 253 253 253 253 253
37636-253 253 253 253 253 253 253 253 253 253 253 253
37637-253 253 253 253 253 253 253 253 253 253 253 253
37638-253 253 253 253 253 253 253 253 253 228 184 62
37639-241 196 14 241 208 19 232 195 16 38 30 10
37640- 2 2 6 2 2 6 2 2 6 2 2 6
37641- 2 2 6 6 6 6 30 30 30 26 26 26
37642-203 166 17 154 142 90 66 66 66 26 26 26
37643- 6 6 6 0 0 0 0 0 0 0 0 0
37644- 0 0 0 0 0 0 0 0 0 0 0 0
37645- 0 0 0 0 0 0 0 0 0 0 0 0
37646- 0 0 0 0 0 0 0 0 0 0 0 0
37647- 0 0 0 0 0 0 0 0 0 0 0 0
37648- 6 6 6 18 18 18 38 38 38 58 58 58
37649- 78 78 78 86 86 86 101 101 101 123 123 123
37650-175 146 61 210 150 10 234 174 13 246 186 14
37651-246 190 14 246 190 14 246 190 14 238 190 10
37652-102 78 10 2 2 6 46 46 46 198 198 198
37653-253 253 253 253 253 253 253 253 253 253 253 253
37654-253 253 253 253 253 253 234 234 234 242 242 242
37655-253 253 253 253 253 253 253 253 253 253 253 253
37656-253 253 253 253 253 253 253 253 253 253 253 253
37657-253 253 253 253 253 253 253 253 253 253 253 253
37658-253 253 253 253 253 253 253 253 253 224 178 62
37659-242 186 14 241 196 14 210 166 10 22 18 6
37660- 2 2 6 2 2 6 2 2 6 2 2 6
37661- 2 2 6 2 2 6 6 6 6 121 92 8
37662-238 202 15 232 195 16 82 82 82 34 34 34
37663- 10 10 10 0 0 0 0 0 0 0 0 0
37664- 0 0 0 0 0 0 0 0 0 0 0 0
37665- 0 0 0 0 0 0 0 0 0 0 0 0
37666- 0 0 0 0 0 0 0 0 0 0 0 0
37667- 0 0 0 0 0 0 0 0 0 0 0 0
37668- 14 14 14 38 38 38 70 70 70 154 122 46
37669-190 142 34 200 144 11 197 138 11 197 138 11
37670-213 154 11 226 170 11 242 186 14 246 190 14
37671-246 190 14 246 190 14 246 190 14 246 190 14
37672-225 175 15 46 32 6 2 2 6 22 22 22
37673-158 158 158 250 250 250 253 253 253 253 253 253
37674-253 253 253 253 253 253 253 253 253 253 253 253
37675-253 253 253 253 253 253 253 253 253 253 253 253
37676-253 253 253 253 253 253 253 253 253 253 253 253
37677-253 253 253 253 253 253 253 253 253 253 253 253
37678-253 253 253 250 250 250 242 242 242 224 178 62
37679-239 182 13 236 186 11 213 154 11 46 32 6
37680- 2 2 6 2 2 6 2 2 6 2 2 6
37681- 2 2 6 2 2 6 61 42 6 225 175 15
37682-238 190 10 236 186 11 112 100 78 42 42 42
37683- 14 14 14 0 0 0 0 0 0 0 0 0
37684- 0 0 0 0 0 0 0 0 0 0 0 0
37685- 0 0 0 0 0 0 0 0 0 0 0 0
37686- 0 0 0 0 0 0 0 0 0 0 0 0
37687- 0 0 0 0 0 0 0 0 0 6 6 6
37688- 22 22 22 54 54 54 154 122 46 213 154 11
37689-226 170 11 230 174 11 226 170 11 226 170 11
37690-236 178 12 242 186 14 246 190 14 246 190 14
37691-246 190 14 246 190 14 246 190 14 246 190 14
37692-241 196 14 184 144 12 10 10 10 2 2 6
37693- 6 6 6 116 116 116 242 242 242 253 253 253
37694-253 253 253 253 253 253 253 253 253 253 253 253
37695-253 253 253 253 253 253 253 253 253 253 253 253
37696-253 253 253 253 253 253 253 253 253 253 253 253
37697-253 253 253 253 253 253 253 253 253 253 253 253
37698-253 253 253 231 231 231 198 198 198 214 170 54
37699-236 178 12 236 178 12 210 150 10 137 92 6
37700- 18 14 6 2 2 6 2 2 6 2 2 6
37701- 6 6 6 70 47 6 200 144 11 236 178 12
37702-239 182 13 239 182 13 124 112 88 58 58 58
37703- 22 22 22 6 6 6 0 0 0 0 0 0
37704- 0 0 0 0 0 0 0 0 0 0 0 0
37705- 0 0 0 0 0 0 0 0 0 0 0 0
37706- 0 0 0 0 0 0 0 0 0 0 0 0
37707- 0 0 0 0 0 0 0 0 0 10 10 10
37708- 30 30 30 70 70 70 180 133 36 226 170 11
37709-239 182 13 242 186 14 242 186 14 246 186 14
37710-246 190 14 246 190 14 246 190 14 246 190 14
37711-246 190 14 246 190 14 246 190 14 246 190 14
37712-246 190 14 232 195 16 98 70 6 2 2 6
37713- 2 2 6 2 2 6 66 66 66 221 221 221
37714-253 253 253 253 253 253 253 253 253 253 253 253
37715-253 253 253 253 253 253 253 253 253 253 253 253
37716-253 253 253 253 253 253 253 253 253 253 253 253
37717-253 253 253 253 253 253 253 253 253 253 253 253
37718-253 253 253 206 206 206 198 198 198 214 166 58
37719-230 174 11 230 174 11 216 158 10 192 133 9
37720-163 110 8 116 81 8 102 78 10 116 81 8
37721-167 114 7 197 138 11 226 170 11 239 182 13
37722-242 186 14 242 186 14 162 146 94 78 78 78
37723- 34 34 34 14 14 14 6 6 6 0 0 0
37724- 0 0 0 0 0 0 0 0 0 0 0 0
37725- 0 0 0 0 0 0 0 0 0 0 0 0
37726- 0 0 0 0 0 0 0 0 0 0 0 0
37727- 0 0 0 0 0 0 0 0 0 6 6 6
37728- 30 30 30 78 78 78 190 142 34 226 170 11
37729-239 182 13 246 190 14 246 190 14 246 190 14
37730-246 190 14 246 190 14 246 190 14 246 190 14
37731-246 190 14 246 190 14 246 190 14 246 190 14
37732-246 190 14 241 196 14 203 166 17 22 18 6
37733- 2 2 6 2 2 6 2 2 6 38 38 38
37734-218 218 218 253 253 253 253 253 253 253 253 253
37735-253 253 253 253 253 253 253 253 253 253 253 253
37736-253 253 253 253 253 253 253 253 253 253 253 253
37737-253 253 253 253 253 253 253 253 253 253 253 253
37738-250 250 250 206 206 206 198 198 198 202 162 69
37739-226 170 11 236 178 12 224 166 10 210 150 10
37740-200 144 11 197 138 11 192 133 9 197 138 11
37741-210 150 10 226 170 11 242 186 14 246 190 14
37742-246 190 14 246 186 14 225 175 15 124 112 88
37743- 62 62 62 30 30 30 14 14 14 6 6 6
37744- 0 0 0 0 0 0 0 0 0 0 0 0
37745- 0 0 0 0 0 0 0 0 0 0 0 0
37746- 0 0 0 0 0 0 0 0 0 0 0 0
37747- 0 0 0 0 0 0 0 0 0 10 10 10
37748- 30 30 30 78 78 78 174 135 50 224 166 10
37749-239 182 13 246 190 14 246 190 14 246 190 14
37750-246 190 14 246 190 14 246 190 14 246 190 14
37751-246 190 14 246 190 14 246 190 14 246 190 14
37752-246 190 14 246 190 14 241 196 14 139 102 15
37753- 2 2 6 2 2 6 2 2 6 2 2 6
37754- 78 78 78 250 250 250 253 253 253 253 253 253
37755-253 253 253 253 253 253 253 253 253 253 253 253
37756-253 253 253 253 253 253 253 253 253 253 253 253
37757-253 253 253 253 253 253 253 253 253 253 253 253
37758-250 250 250 214 214 214 198 198 198 190 150 46
37759-219 162 10 236 178 12 234 174 13 224 166 10
37760-216 158 10 213 154 11 213 154 11 216 158 10
37761-226 170 11 239 182 13 246 190 14 246 190 14
37762-246 190 14 246 190 14 242 186 14 206 162 42
37763-101 101 101 58 58 58 30 30 30 14 14 14
37764- 6 6 6 0 0 0 0 0 0 0 0 0
37765- 0 0 0 0 0 0 0 0 0 0 0 0
37766- 0 0 0 0 0 0 0 0 0 0 0 0
37767- 0 0 0 0 0 0 0 0 0 10 10 10
37768- 30 30 30 74 74 74 174 135 50 216 158 10
37769-236 178 12 246 190 14 246 190 14 246 190 14
37770-246 190 14 246 190 14 246 190 14 246 190 14
37771-246 190 14 246 190 14 246 190 14 246 190 14
37772-246 190 14 246 190 14 241 196 14 226 184 13
37773- 61 42 6 2 2 6 2 2 6 2 2 6
37774- 22 22 22 238 238 238 253 253 253 253 253 253
37775-253 253 253 253 253 253 253 253 253 253 253 253
37776-253 253 253 253 253 253 253 253 253 253 253 253
37777-253 253 253 253 253 253 253 253 253 253 253 253
37778-253 253 253 226 226 226 187 187 187 180 133 36
37779-216 158 10 236 178 12 239 182 13 236 178 12
37780-230 174 11 226 170 11 226 170 11 230 174 11
37781-236 178 12 242 186 14 246 190 14 246 190 14
37782-246 190 14 246 190 14 246 186 14 239 182 13
37783-206 162 42 106 106 106 66 66 66 34 34 34
37784- 14 14 14 6 6 6 0 0 0 0 0 0
37785- 0 0 0 0 0 0 0 0 0 0 0 0
37786- 0 0 0 0 0 0 0 0 0 0 0 0
37787- 0 0 0 0 0 0 0 0 0 6 6 6
37788- 26 26 26 70 70 70 163 133 67 213 154 11
37789-236 178 12 246 190 14 246 190 14 246 190 14
37790-246 190 14 246 190 14 246 190 14 246 190 14
37791-246 190 14 246 190 14 246 190 14 246 190 14
37792-246 190 14 246 190 14 246 190 14 241 196 14
37793-190 146 13 18 14 6 2 2 6 2 2 6
37794- 46 46 46 246 246 246 253 253 253 253 253 253
37795-253 253 253 253 253 253 253 253 253 253 253 253
37796-253 253 253 253 253 253 253 253 253 253 253 253
37797-253 253 253 253 253 253 253 253 253 253 253 253
37798-253 253 253 221 221 221 86 86 86 156 107 11
37799-216 158 10 236 178 12 242 186 14 246 186 14
37800-242 186 14 239 182 13 239 182 13 242 186 14
37801-242 186 14 246 186 14 246 190 14 246 190 14
37802-246 190 14 246 190 14 246 190 14 246 190 14
37803-242 186 14 225 175 15 142 122 72 66 66 66
37804- 30 30 30 10 10 10 0 0 0 0 0 0
37805- 0 0 0 0 0 0 0 0 0 0 0 0
37806- 0 0 0 0 0 0 0 0 0 0 0 0
37807- 0 0 0 0 0 0 0 0 0 6 6 6
37808- 26 26 26 70 70 70 163 133 67 210 150 10
37809-236 178 12 246 190 14 246 190 14 246 190 14
37810-246 190 14 246 190 14 246 190 14 246 190 14
37811-246 190 14 246 190 14 246 190 14 246 190 14
37812-246 190 14 246 190 14 246 190 14 246 190 14
37813-232 195 16 121 92 8 34 34 34 106 106 106
37814-221 221 221 253 253 253 253 253 253 253 253 253
37815-253 253 253 253 253 253 253 253 253 253 253 253
37816-253 253 253 253 253 253 253 253 253 253 253 253
37817-253 253 253 253 253 253 253 253 253 253 253 253
37818-242 242 242 82 82 82 18 14 6 163 110 8
37819-216 158 10 236 178 12 242 186 14 246 190 14
37820-246 190 14 246 190 14 246 190 14 246 190 14
37821-246 190 14 246 190 14 246 190 14 246 190 14
37822-246 190 14 246 190 14 246 190 14 246 190 14
37823-246 190 14 246 190 14 242 186 14 163 133 67
37824- 46 46 46 18 18 18 6 6 6 0 0 0
37825- 0 0 0 0 0 0 0 0 0 0 0 0
37826- 0 0 0 0 0 0 0 0 0 0 0 0
37827- 0 0 0 0 0 0 0 0 0 10 10 10
37828- 30 30 30 78 78 78 163 133 67 210 150 10
37829-236 178 12 246 186 14 246 190 14 246 190 14
37830-246 190 14 246 190 14 246 190 14 246 190 14
37831-246 190 14 246 190 14 246 190 14 246 190 14
37832-246 190 14 246 190 14 246 190 14 246 190 14
37833-241 196 14 215 174 15 190 178 144 253 253 253
37834-253 253 253 253 253 253 253 253 253 253 253 253
37835-253 253 253 253 253 253 253 253 253 253 253 253
37836-253 253 253 253 253 253 253 253 253 253 253 253
37837-253 253 253 253 253 253 253 253 253 218 218 218
37838- 58 58 58 2 2 6 22 18 6 167 114 7
37839-216 158 10 236 178 12 246 186 14 246 190 14
37840-246 190 14 246 190 14 246 190 14 246 190 14
37841-246 190 14 246 190 14 246 190 14 246 190 14
37842-246 190 14 246 190 14 246 190 14 246 190 14
37843-246 190 14 246 186 14 242 186 14 190 150 46
37844- 54 54 54 22 22 22 6 6 6 0 0 0
37845- 0 0 0 0 0 0 0 0 0 0 0 0
37846- 0 0 0 0 0 0 0 0 0 0 0 0
37847- 0 0 0 0 0 0 0 0 0 14 14 14
37848- 38 38 38 86 86 86 180 133 36 213 154 11
37849-236 178 12 246 186 14 246 190 14 246 190 14
37850-246 190 14 246 190 14 246 190 14 246 190 14
37851-246 190 14 246 190 14 246 190 14 246 190 14
37852-246 190 14 246 190 14 246 190 14 246 190 14
37853-246 190 14 232 195 16 190 146 13 214 214 214
37854-253 253 253 253 253 253 253 253 253 253 253 253
37855-253 253 253 253 253 253 253 253 253 253 253 253
37856-253 253 253 253 253 253 253 253 253 253 253 253
37857-253 253 253 250 250 250 170 170 170 26 26 26
37858- 2 2 6 2 2 6 37 26 9 163 110 8
37859-219 162 10 239 182 13 246 186 14 246 190 14
37860-246 190 14 246 190 14 246 190 14 246 190 14
37861-246 190 14 246 190 14 246 190 14 246 190 14
37862-246 190 14 246 190 14 246 190 14 246 190 14
37863-246 186 14 236 178 12 224 166 10 142 122 72
37864- 46 46 46 18 18 18 6 6 6 0 0 0
37865- 0 0 0 0 0 0 0 0 0 0 0 0
37866- 0 0 0 0 0 0 0 0 0 0 0 0
37867- 0 0 0 0 0 0 6 6 6 18 18 18
37868- 50 50 50 109 106 95 192 133 9 224 166 10
37869-242 186 14 246 190 14 246 190 14 246 190 14
37870-246 190 14 246 190 14 246 190 14 246 190 14
37871-246 190 14 246 190 14 246 190 14 246 190 14
37872-246 190 14 246 190 14 246 190 14 246 190 14
37873-242 186 14 226 184 13 210 162 10 142 110 46
37874-226 226 226 253 253 253 253 253 253 253 253 253
37875-253 253 253 253 253 253 253 253 253 253 253 253
37876-253 253 253 253 253 253 253 253 253 253 253 253
37877-198 198 198 66 66 66 2 2 6 2 2 6
37878- 2 2 6 2 2 6 50 34 6 156 107 11
37879-219 162 10 239 182 13 246 186 14 246 190 14
37880-246 190 14 246 190 14 246 190 14 246 190 14
37881-246 190 14 246 190 14 246 190 14 246 190 14
37882-246 190 14 246 190 14 246 190 14 242 186 14
37883-234 174 13 213 154 11 154 122 46 66 66 66
37884- 30 30 30 10 10 10 0 0 0 0 0 0
37885- 0 0 0 0 0 0 0 0 0 0 0 0
37886- 0 0 0 0 0 0 0 0 0 0 0 0
37887- 0 0 0 0 0 0 6 6 6 22 22 22
37888- 58 58 58 154 121 60 206 145 10 234 174 13
37889-242 186 14 246 186 14 246 190 14 246 190 14
37890-246 190 14 246 190 14 246 190 14 246 190 14
37891-246 190 14 246 190 14 246 190 14 246 190 14
37892-246 190 14 246 190 14 246 190 14 246 190 14
37893-246 186 14 236 178 12 210 162 10 163 110 8
37894- 61 42 6 138 138 138 218 218 218 250 250 250
37895-253 253 253 253 253 253 253 253 253 250 250 250
37896-242 242 242 210 210 210 144 144 144 66 66 66
37897- 6 6 6 2 2 6 2 2 6 2 2 6
37898- 2 2 6 2 2 6 61 42 6 163 110 8
37899-216 158 10 236 178 12 246 190 14 246 190 14
37900-246 190 14 246 190 14 246 190 14 246 190 14
37901-246 190 14 246 190 14 246 190 14 246 190 14
37902-246 190 14 239 182 13 230 174 11 216 158 10
37903-190 142 34 124 112 88 70 70 70 38 38 38
37904- 18 18 18 6 6 6 0 0 0 0 0 0
37905- 0 0 0 0 0 0 0 0 0 0 0 0
37906- 0 0 0 0 0 0 0 0 0 0 0 0
37907- 0 0 0 0 0 0 6 6 6 22 22 22
37908- 62 62 62 168 124 44 206 145 10 224 166 10
37909-236 178 12 239 182 13 242 186 14 242 186 14
37910-246 186 14 246 190 14 246 190 14 246 190 14
37911-246 190 14 246 190 14 246 190 14 246 190 14
37912-246 190 14 246 190 14 246 190 14 246 190 14
37913-246 190 14 236 178 12 216 158 10 175 118 6
37914- 80 54 7 2 2 6 6 6 6 30 30 30
37915- 54 54 54 62 62 62 50 50 50 38 38 38
37916- 14 14 14 2 2 6 2 2 6 2 2 6
37917- 2 2 6 2 2 6 2 2 6 2 2 6
37918- 2 2 6 6 6 6 80 54 7 167 114 7
37919-213 154 11 236 178 12 246 190 14 246 190 14
37920-246 190 14 246 190 14 246 190 14 246 190 14
37921-246 190 14 242 186 14 239 182 13 239 182 13
37922-230 174 11 210 150 10 174 135 50 124 112 88
37923- 82 82 82 54 54 54 34 34 34 18 18 18
37924- 6 6 6 0 0 0 0 0 0 0 0 0
37925- 0 0 0 0 0 0 0 0 0 0 0 0
37926- 0 0 0 0 0 0 0 0 0 0 0 0
37927- 0 0 0 0 0 0 6 6 6 18 18 18
37928- 50 50 50 158 118 36 192 133 9 200 144 11
37929-216 158 10 219 162 10 224 166 10 226 170 11
37930-230 174 11 236 178 12 239 182 13 239 182 13
37931-242 186 14 246 186 14 246 190 14 246 190 14
37932-246 190 14 246 190 14 246 190 14 246 190 14
37933-246 186 14 230 174 11 210 150 10 163 110 8
37934-104 69 6 10 10 10 2 2 6 2 2 6
37935- 2 2 6 2 2 6 2 2 6 2 2 6
37936- 2 2 6 2 2 6 2 2 6 2 2 6
37937- 2 2 6 2 2 6 2 2 6 2 2 6
37938- 2 2 6 6 6 6 91 60 6 167 114 7
37939-206 145 10 230 174 11 242 186 14 246 190 14
37940-246 190 14 246 190 14 246 186 14 242 186 14
37941-239 182 13 230 174 11 224 166 10 213 154 11
37942-180 133 36 124 112 88 86 86 86 58 58 58
37943- 38 38 38 22 22 22 10 10 10 6 6 6
37944- 0 0 0 0 0 0 0 0 0 0 0 0
37945- 0 0 0 0 0 0 0 0 0 0 0 0
37946- 0 0 0 0 0 0 0 0 0 0 0 0
37947- 0 0 0 0 0 0 0 0 0 14 14 14
37948- 34 34 34 70 70 70 138 110 50 158 118 36
37949-167 114 7 180 123 7 192 133 9 197 138 11
37950-200 144 11 206 145 10 213 154 11 219 162 10
37951-224 166 10 230 174 11 239 182 13 242 186 14
37952-246 186 14 246 186 14 246 186 14 246 186 14
37953-239 182 13 216 158 10 185 133 11 152 99 6
37954-104 69 6 18 14 6 2 2 6 2 2 6
37955- 2 2 6 2 2 6 2 2 6 2 2 6
37956- 2 2 6 2 2 6 2 2 6 2 2 6
37957- 2 2 6 2 2 6 2 2 6 2 2 6
37958- 2 2 6 6 6 6 80 54 7 152 99 6
37959-192 133 9 219 162 10 236 178 12 239 182 13
37960-246 186 14 242 186 14 239 182 13 236 178 12
37961-224 166 10 206 145 10 192 133 9 154 121 60
37962- 94 94 94 62 62 62 42 42 42 22 22 22
37963- 14 14 14 6 6 6 0 0 0 0 0 0
37964- 0 0 0 0 0 0 0 0 0 0 0 0
37965- 0 0 0 0 0 0 0 0 0 0 0 0
37966- 0 0 0 0 0 0 0 0 0 0 0 0
37967- 0 0 0 0 0 0 0 0 0 6 6 6
37968- 18 18 18 34 34 34 58 58 58 78 78 78
37969-101 98 89 124 112 88 142 110 46 156 107 11
37970-163 110 8 167 114 7 175 118 6 180 123 7
37971-185 133 11 197 138 11 210 150 10 219 162 10
37972-226 170 11 236 178 12 236 178 12 234 174 13
37973-219 162 10 197 138 11 163 110 8 130 83 6
37974- 91 60 6 10 10 10 2 2 6 2 2 6
37975- 18 18 18 38 38 38 38 38 38 38 38 38
37976- 38 38 38 38 38 38 38 38 38 38 38 38
37977- 38 38 38 38 38 38 26 26 26 2 2 6
37978- 2 2 6 6 6 6 70 47 6 137 92 6
37979-175 118 6 200 144 11 219 162 10 230 174 11
37980-234 174 13 230 174 11 219 162 10 210 150 10
37981-192 133 9 163 110 8 124 112 88 82 82 82
37982- 50 50 50 30 30 30 14 14 14 6 6 6
37983- 0 0 0 0 0 0 0 0 0 0 0 0
37984- 0 0 0 0 0 0 0 0 0 0 0 0
37985- 0 0 0 0 0 0 0 0 0 0 0 0
37986- 0 0 0 0 0 0 0 0 0 0 0 0
37987- 0 0 0 0 0 0 0 0 0 0 0 0
37988- 6 6 6 14 14 14 22 22 22 34 34 34
37989- 42 42 42 58 58 58 74 74 74 86 86 86
37990-101 98 89 122 102 70 130 98 46 121 87 25
37991-137 92 6 152 99 6 163 110 8 180 123 7
37992-185 133 11 197 138 11 206 145 10 200 144 11
37993-180 123 7 156 107 11 130 83 6 104 69 6
37994- 50 34 6 54 54 54 110 110 110 101 98 89
37995- 86 86 86 82 82 82 78 78 78 78 78 78
37996- 78 78 78 78 78 78 78 78 78 78 78 78
37997- 78 78 78 82 82 82 86 86 86 94 94 94
37998-106 106 106 101 101 101 86 66 34 124 80 6
37999-156 107 11 180 123 7 192 133 9 200 144 11
38000-206 145 10 200 144 11 192 133 9 175 118 6
38001-139 102 15 109 106 95 70 70 70 42 42 42
38002- 22 22 22 10 10 10 0 0 0 0 0 0
38003- 0 0 0 0 0 0 0 0 0 0 0 0
38004- 0 0 0 0 0 0 0 0 0 0 0 0
38005- 0 0 0 0 0 0 0 0 0 0 0 0
38006- 0 0 0 0 0 0 0 0 0 0 0 0
38007- 0 0 0 0 0 0 0 0 0 0 0 0
38008- 0 0 0 0 0 0 6 6 6 10 10 10
38009- 14 14 14 22 22 22 30 30 30 38 38 38
38010- 50 50 50 62 62 62 74 74 74 90 90 90
38011-101 98 89 112 100 78 121 87 25 124 80 6
38012-137 92 6 152 99 6 152 99 6 152 99 6
38013-138 86 6 124 80 6 98 70 6 86 66 30
38014-101 98 89 82 82 82 58 58 58 46 46 46
38015- 38 38 38 34 34 34 34 34 34 34 34 34
38016- 34 34 34 34 34 34 34 34 34 34 34 34
38017- 34 34 34 34 34 34 38 38 38 42 42 42
38018- 54 54 54 82 82 82 94 86 76 91 60 6
38019-134 86 6 156 107 11 167 114 7 175 118 6
38020-175 118 6 167 114 7 152 99 6 121 87 25
38021-101 98 89 62 62 62 34 34 34 18 18 18
38022- 6 6 6 0 0 0 0 0 0 0 0 0
38023- 0 0 0 0 0 0 0 0 0 0 0 0
38024- 0 0 0 0 0 0 0 0 0 0 0 0
38025- 0 0 0 0 0 0 0 0 0 0 0 0
38026- 0 0 0 0 0 0 0 0 0 0 0 0
38027- 0 0 0 0 0 0 0 0 0 0 0 0
38028- 0 0 0 0 0 0 0 0 0 0 0 0
38029- 0 0 0 6 6 6 6 6 6 10 10 10
38030- 18 18 18 22 22 22 30 30 30 42 42 42
38031- 50 50 50 66 66 66 86 86 86 101 98 89
38032-106 86 58 98 70 6 104 69 6 104 69 6
38033-104 69 6 91 60 6 82 62 34 90 90 90
38034- 62 62 62 38 38 38 22 22 22 14 14 14
38035- 10 10 10 10 10 10 10 10 10 10 10 10
38036- 10 10 10 10 10 10 6 6 6 10 10 10
38037- 10 10 10 10 10 10 10 10 10 14 14 14
38038- 22 22 22 42 42 42 70 70 70 89 81 66
38039- 80 54 7 104 69 6 124 80 6 137 92 6
38040-134 86 6 116 81 8 100 82 52 86 86 86
38041- 58 58 58 30 30 30 14 14 14 6 6 6
38042- 0 0 0 0 0 0 0 0 0 0 0 0
38043- 0 0 0 0 0 0 0 0 0 0 0 0
38044- 0 0 0 0 0 0 0 0 0 0 0 0
38045- 0 0 0 0 0 0 0 0 0 0 0 0
38046- 0 0 0 0 0 0 0 0 0 0 0 0
38047- 0 0 0 0 0 0 0 0 0 0 0 0
38048- 0 0 0 0 0 0 0 0 0 0 0 0
38049- 0 0 0 0 0 0 0 0 0 0 0 0
38050- 0 0 0 6 6 6 10 10 10 14 14 14
38051- 18 18 18 26 26 26 38 38 38 54 54 54
38052- 70 70 70 86 86 86 94 86 76 89 81 66
38053- 89 81 66 86 86 86 74 74 74 50 50 50
38054- 30 30 30 14 14 14 6 6 6 0 0 0
38055- 0 0 0 0 0 0 0 0 0 0 0 0
38056- 0 0 0 0 0 0 0 0 0 0 0 0
38057- 0 0 0 0 0 0 0 0 0 0 0 0
38058- 6 6 6 18 18 18 34 34 34 58 58 58
38059- 82 82 82 89 81 66 89 81 66 89 81 66
38060- 94 86 66 94 86 76 74 74 74 50 50 50
38061- 26 26 26 14 14 14 6 6 6 0 0 0
38062- 0 0 0 0 0 0 0 0 0 0 0 0
38063- 0 0 0 0 0 0 0 0 0 0 0 0
38064- 0 0 0 0 0 0 0 0 0 0 0 0
38065- 0 0 0 0 0 0 0 0 0 0 0 0
38066- 0 0 0 0 0 0 0 0 0 0 0 0
38067- 0 0 0 0 0 0 0 0 0 0 0 0
38068- 0 0 0 0 0 0 0 0 0 0 0 0
38069- 0 0 0 0 0 0 0 0 0 0 0 0
38070- 0 0 0 0 0 0 0 0 0 0 0 0
38071- 6 6 6 6 6 6 14 14 14 18 18 18
38072- 30 30 30 38 38 38 46 46 46 54 54 54
38073- 50 50 50 42 42 42 30 30 30 18 18 18
38074- 10 10 10 0 0 0 0 0 0 0 0 0
38075- 0 0 0 0 0 0 0 0 0 0 0 0
38076- 0 0 0 0 0 0 0 0 0 0 0 0
38077- 0 0 0 0 0 0 0 0 0 0 0 0
38078- 0 0 0 6 6 6 14 14 14 26 26 26
38079- 38 38 38 50 50 50 58 58 58 58 58 58
38080- 54 54 54 42 42 42 30 30 30 18 18 18
38081- 10 10 10 0 0 0 0 0 0 0 0 0
38082- 0 0 0 0 0 0 0 0 0 0 0 0
38083- 0 0 0 0 0 0 0 0 0 0 0 0
38084- 0 0 0 0 0 0 0 0 0 0 0 0
38085- 0 0 0 0 0 0 0 0 0 0 0 0
38086- 0 0 0 0 0 0 0 0 0 0 0 0
38087- 0 0 0 0 0 0 0 0 0 0 0 0
38088- 0 0 0 0 0 0 0 0 0 0 0 0
38089- 0 0 0 0 0 0 0 0 0 0 0 0
38090- 0 0 0 0 0 0 0 0 0 0 0 0
38091- 0 0 0 0 0 0 0 0 0 6 6 6
38092- 6 6 6 10 10 10 14 14 14 18 18 18
38093- 18 18 18 14 14 14 10 10 10 6 6 6
38094- 0 0 0 0 0 0 0 0 0 0 0 0
38095- 0 0 0 0 0 0 0 0 0 0 0 0
38096- 0 0 0 0 0 0 0 0 0 0 0 0
38097- 0 0 0 0 0 0 0 0 0 0 0 0
38098- 0 0 0 0 0 0 0 0 0 6 6 6
38099- 14 14 14 18 18 18 22 22 22 22 22 22
38100- 18 18 18 14 14 14 10 10 10 6 6 6
38101- 0 0 0 0 0 0 0 0 0 0 0 0
38102- 0 0 0 0 0 0 0 0 0 0 0 0
38103- 0 0 0 0 0 0 0 0 0 0 0 0
38104- 0 0 0 0 0 0 0 0 0 0 0 0
38105- 0 0 0 0 0 0 0 0 0 0 0 0
38106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38119+4 4 4 4 4 4
38120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38133+4 4 4 4 4 4
38134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38147+4 4 4 4 4 4
38148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38161+4 4 4 4 4 4
38162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38175+4 4 4 4 4 4
38176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38189+4 4 4 4 4 4
38190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38195+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38200+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38201+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38203+4 4 4 4 4 4
38204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38209+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38210+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38214+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38215+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38216+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38217+4 4 4 4 4 4
38218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38223+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38224+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38228+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38229+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38230+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38231+4 4 4 4 4 4
38232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38236+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38237+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38238+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38240+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38241+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38242+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38243+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38244+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38245+4 4 4 4 4 4
38246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38250+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38251+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38252+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38253+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38254+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38255+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38256+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38257+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38258+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38259+4 4 4 4 4 4
38260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38263+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38264+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38265+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38266+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38267+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38268+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38269+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38270+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38271+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38272+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38273+4 4 4 4 4 4
38274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38277+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38278+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38279+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38280+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38281+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38282+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38283+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38284+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38285+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38286+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38287+4 4 4 4 4 4
38288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38291+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38292+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38293+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38294+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38295+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38296+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38297+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38298+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38299+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38300+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38301+4 4 4 4 4 4
38302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38305+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38306+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38307+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38308+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38309+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38310+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38311+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38312+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38313+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38314+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38315+4 4 4 4 4 4
38316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38319+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38320+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38321+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38322+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38323+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38324+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38325+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38326+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38327+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38328+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38329+4 4 4 4 4 4
38330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38331+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38332+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38333+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38334+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38335+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38336+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38337+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38338+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38339+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38340+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38341+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38342+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38343+4 4 4 4 4 4
38344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38345+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38346+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38347+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38348+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38349+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38350+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38351+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38352+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38353+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38354+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38355+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38356+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38357+0 0 0 4 4 4
38358+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38359+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38360+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38361+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38362+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38363+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38364+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38365+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38366+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38367+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38368+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38369+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38370+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38371+2 0 0 0 0 0
38372+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38373+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38374+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38375+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38376+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38377+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38378+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38379+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38380+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38381+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38382+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38383+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38384+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38385+37 38 37 0 0 0
38386+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38387+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38388+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38389+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38390+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38391+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38392+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38393+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38394+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38395+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38396+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38397+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38398+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38399+85 115 134 4 0 0
38400+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38401+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38402+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38403+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38404+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38405+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38406+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38407+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38408+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38409+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38410+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38411+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38412+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38413+60 73 81 4 0 0
38414+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38415+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38416+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38417+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38418+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38419+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38420+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38421+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38422+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38423+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38424+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38425+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38426+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38427+16 19 21 4 0 0
38428+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38429+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38430+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38431+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38432+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38433+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38434+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38435+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38436+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38437+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38438+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38439+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38440+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38441+4 0 0 4 3 3
38442+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38443+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38444+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38446+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38447+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38448+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38449+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38450+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38451+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38452+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38453+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38454+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38455+3 2 2 4 4 4
38456+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38457+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38458+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38459+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38460+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38461+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38462+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38463+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38464+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38465+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38466+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38467+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38468+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38469+4 4 4 4 4 4
38470+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38471+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38472+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38473+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38474+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38475+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38476+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38477+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38478+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38479+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38480+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38481+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38482+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38483+4 4 4 4 4 4
38484+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38485+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38486+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38487+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38488+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38489+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38490+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38491+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38492+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38493+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38494+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38495+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38496+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38497+5 5 5 5 5 5
38498+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38499+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38500+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38501+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38502+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38503+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38504+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38505+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38506+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38507+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38508+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38509+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38510+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38511+5 5 5 4 4 4
38512+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38513+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38514+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38515+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38516+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38517+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38518+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38519+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38520+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38521+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38522+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38523+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38525+4 4 4 4 4 4
38526+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38527+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38528+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38529+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38530+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38531+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38532+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38533+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38534+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38535+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38536+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38537+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38539+4 4 4 4 4 4
38540+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38541+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38542+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38543+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38544+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38545+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38546+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38547+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38548+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38549+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38550+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553+4 4 4 4 4 4
38554+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38555+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38556+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38557+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38558+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38559+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38560+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38561+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38562+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38563+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38564+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4
38568+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38569+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38570+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38571+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38572+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38573+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38574+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38575+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38576+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38577+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38578+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4
38582+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38583+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38584+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38585+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38586+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38587+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38588+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38589+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38590+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38591+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38592+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4
38596+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38597+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38598+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38599+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38600+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38601+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38602+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38603+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38604+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38605+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38606+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4
38610+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38611+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38612+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38613+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38614+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38615+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38616+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38617+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38618+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38619+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38620+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4
38624+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38625+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38626+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38627+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38628+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38629+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38630+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38631+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38632+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38633+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38634+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4
38638+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38639+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38640+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38641+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38642+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38643+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38644+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38645+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38646+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38647+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38648+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4
38652+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38653+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38654+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38655+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38656+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38657+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38658+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38659+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38660+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38661+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38662+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4
38666+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38667+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38668+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38669+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38670+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38671+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38672+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38673+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38674+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38675+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38676+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4
38680+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38681+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38682+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38683+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38684+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38685+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38686+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38687+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38688+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38689+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38690+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38693+4 4 4 4 4 4
38694+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38695+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38696+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38697+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38698+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38699+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38700+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38701+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38702+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38703+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38704+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38707+4 4 4 4 4 4
38708+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38709+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38710+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38711+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38712+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38713+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38714+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38715+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38716+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38717+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38718+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38721+4 4 4 4 4 4
38722+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38723+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38724+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38725+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38726+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38727+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38728+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38729+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38730+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38731+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38732+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38735+4 4 4 4 4 4
38736+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38737+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38738+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38739+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38740+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38741+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38742+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38743+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38744+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38745+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38746+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749+4 4 4 4 4 4
38750+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38751+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38752+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38753+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38754+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38755+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38756+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38757+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38758+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38759+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38760+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38763+4 4 4 4 4 4
38764+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38765+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38766+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38767+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38768+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38769+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38770+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38771+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38772+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38773+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38774+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38777+4 4 4 4 4 4
38778+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38779+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38780+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38781+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38782+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38783+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38784+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38785+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38786+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38787+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38788+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38791+4 4 4 4 4 4
38792+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38793+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38794+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38795+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38796+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38797+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38798+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38799+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38800+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38801+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38802+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38805+4 4 4 4 4 4
38806+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38807+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38808+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38809+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38810+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38811+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38812+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38813+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38814+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38815+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38816+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38819+4 4 4 4 4 4
38820+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38821+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38822+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38823+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38824+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38825+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38826+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38827+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38828+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38829+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38830+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833+4 4 4 4 4 4
38834+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38835+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38836+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38837+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38838+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38839+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38840+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38841+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38842+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38843+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38844+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38847+4 4 4 4 4 4
38848+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38849+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38850+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38851+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38852+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38853+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38854+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38855+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38856+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38857+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38858+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861+4 4 4 4 4 4
38862+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38863+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38864+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38865+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38866+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38867+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38868+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38869+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38870+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38871+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38872+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875+4 4 4 4 4 4
38876+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38877+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38878+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38879+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38880+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38881+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38882+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38883+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38884+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38885+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38886+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889+4 4 4 4 4 4
38890+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38891+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38892+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38893+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38894+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38895+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38896+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38897+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38898+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38899+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38900+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903+4 4 4 4 4 4
38904+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38905+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38906+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38907+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38908+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38909+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38910+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38911+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38912+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38913+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38914+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917+4 4 4 4 4 4
38918+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38919+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38920+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38921+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38922+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38923+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38924+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38925+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38926+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38927+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931+4 4 4 4 4 4
38932+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38933+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38934+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38935+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38936+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38937+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38938+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38939+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38940+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38941+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945+4 4 4 4 4 4
38946+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38947+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38948+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38949+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38950+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38951+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38952+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38953+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38954+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38955+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959+4 4 4 4 4 4
38960+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38961+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38962+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38963+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38964+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38965+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38966+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38967+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38968+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38969+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973+4 4 4 4 4 4
38974+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38975+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38976+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38977+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38978+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38979+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38980+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38981+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38982+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987+4 4 4 4 4 4
38988+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38989+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38990+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38991+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38992+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38993+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38994+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38995+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38996+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001+4 4 4 4 4 4
39002+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39003+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39004+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39005+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39006+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39007+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39008+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39009+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39010+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4
39016+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39017+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39018+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39019+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39020+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39021+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39022+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39023+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39031+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39032+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39033+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39034+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39035+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39036+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39037+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39045+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39046+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39047+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39048+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39049+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39050+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39051+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4
39058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39060+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39061+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39062+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39063+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39064+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39065+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4
39072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39074+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39075+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39076+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39077+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39078+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39079+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085+4 4 4 4 4 4
39086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39089+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39090+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39091+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39092+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39093+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 4 4 4
39100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39103+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39104+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39105+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39106+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4
39114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39118+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39119+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39120+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39132+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39133+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39134+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141+4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39146+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39147+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39148+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155+4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39160+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39161+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39162+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169+4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39175+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39176+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183+4 4 4 4 4 4
39184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39189+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39190+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197+4 4 4 4 4 4
39198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39203+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211+4 4 4 4 4 4
39212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39217+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225+4 4 4 4 4 4
39226diff -urNp linux-3.1.1/drivers/video/udlfb.c linux-3.1.1/drivers/video/udlfb.c
39227--- linux-3.1.1/drivers/video/udlfb.c 2011-11-11 15:19:27.000000000 -0500
39228+++ linux-3.1.1/drivers/video/udlfb.c 2011-11-16 18:39:08.000000000 -0500
39229@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data
39230 dlfb_urb_completion(urb);
39231
39232 error:
39233- atomic_add(bytes_sent, &dev->bytes_sent);
39234- atomic_add(bytes_identical, &dev->bytes_identical);
39235- atomic_add(width*height*2, &dev->bytes_rendered);
39236+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39237+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39238+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39239 end_cycles = get_cycles();
39240- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39241+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39242 >> 10)), /* Kcycles */
39243 &dev->cpu_kcycles_used);
39244
39245@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct
39246 dlfb_urb_completion(urb);
39247
39248 error:
39249- atomic_add(bytes_sent, &dev->bytes_sent);
39250- atomic_add(bytes_identical, &dev->bytes_identical);
39251- atomic_add(bytes_rendered, &dev->bytes_rendered);
39252+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39253+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39254+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39255 end_cycles = get_cycles();
39256- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39257+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39258 >> 10)), /* Kcycles */
39259 &dev->cpu_kcycles_used);
39260 }
39261@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_sh
39262 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39263 struct dlfb_data *dev = fb_info->par;
39264 return snprintf(buf, PAGE_SIZE, "%u\n",
39265- atomic_read(&dev->bytes_rendered));
39266+ atomic_read_unchecked(&dev->bytes_rendered));
39267 }
39268
39269 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39270@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_s
39271 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39272 struct dlfb_data *dev = fb_info->par;
39273 return snprintf(buf, PAGE_SIZE, "%u\n",
39274- atomic_read(&dev->bytes_identical));
39275+ atomic_read_unchecked(&dev->bytes_identical));
39276 }
39277
39278 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39279@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(s
39280 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39281 struct dlfb_data *dev = fb_info->par;
39282 return snprintf(buf, PAGE_SIZE, "%u\n",
39283- atomic_read(&dev->bytes_sent));
39284+ atomic_read_unchecked(&dev->bytes_sent));
39285 }
39286
39287 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39288@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_
39289 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39290 struct dlfb_data *dev = fb_info->par;
39291 return snprintf(buf, PAGE_SIZE, "%u\n",
39292- atomic_read(&dev->cpu_kcycles_used));
39293+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39294 }
39295
39296 static ssize_t edid_show(
39297@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struc
39298 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39299 struct dlfb_data *dev = fb_info->par;
39300
39301- atomic_set(&dev->bytes_rendered, 0);
39302- atomic_set(&dev->bytes_identical, 0);
39303- atomic_set(&dev->bytes_sent, 0);
39304- atomic_set(&dev->cpu_kcycles_used, 0);
39305+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39306+ atomic_set_unchecked(&dev->bytes_identical, 0);
39307+ atomic_set_unchecked(&dev->bytes_sent, 0);
39308+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39309
39310 return count;
39311 }
39312diff -urNp linux-3.1.1/drivers/video/uvesafb.c linux-3.1.1/drivers/video/uvesafb.c
39313--- linux-3.1.1/drivers/video/uvesafb.c 2011-11-11 15:19:27.000000000 -0500
39314+++ linux-3.1.1/drivers/video/uvesafb.c 2011-11-16 18:39:08.000000000 -0500
39315@@ -19,6 +19,7 @@
39316 #include <linux/io.h>
39317 #include <linux/mutex.h>
39318 #include <linux/slab.h>
39319+#include <linux/moduleloader.h>
39320 #include <video/edid.h>
39321 #include <video/uvesafb.h>
39322 #ifdef CONFIG_X86
39323@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39324 NULL,
39325 };
39326
39327- return call_usermodehelper(v86d_path, argv, envp, 1);
39328+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39329 }
39330
39331 /*
39332@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
39333 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39334 par->pmi_setpal = par->ypan = 0;
39335 } else {
39336+
39337+#ifdef CONFIG_PAX_KERNEXEC
39338+#ifdef CONFIG_MODULES
39339+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39340+#endif
39341+ if (!par->pmi_code) {
39342+ par->pmi_setpal = par->ypan = 0;
39343+ return 0;
39344+ }
39345+#endif
39346+
39347 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39348 + task->t.regs.edi);
39349+
39350+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39351+ pax_open_kernel();
39352+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39353+ pax_close_kernel();
39354+
39355+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39356+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39357+#else
39358 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39359 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39360+#endif
39361+
39362 printk(KERN_INFO "uvesafb: protected mode interface info at "
39363 "%04x:%04x\n",
39364 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39365@@ -1821,6 +1844,11 @@ out:
39366 if (par->vbe_modes)
39367 kfree(par->vbe_modes);
39368
39369+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39370+ if (par->pmi_code)
39371+ module_free_exec(NULL, par->pmi_code);
39372+#endif
39373+
39374 framebuffer_release(info);
39375 return err;
39376 }
39377@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39378 kfree(par->vbe_state_orig);
39379 if (par->vbe_state_saved)
39380 kfree(par->vbe_state_saved);
39381+
39382+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39383+ if (par->pmi_code)
39384+ module_free_exec(NULL, par->pmi_code);
39385+#endif
39386+
39387 }
39388
39389 framebuffer_release(info);
39390diff -urNp linux-3.1.1/drivers/video/vesafb.c linux-3.1.1/drivers/video/vesafb.c
39391--- linux-3.1.1/drivers/video/vesafb.c 2011-11-11 15:19:27.000000000 -0500
39392+++ linux-3.1.1/drivers/video/vesafb.c 2011-11-16 18:39:08.000000000 -0500
39393@@ -9,6 +9,7 @@
39394 */
39395
39396 #include <linux/module.h>
39397+#include <linux/moduleloader.h>
39398 #include <linux/kernel.h>
39399 #include <linux/errno.h>
39400 #include <linux/string.h>
39401@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39402 static int vram_total __initdata; /* Set total amount of memory */
39403 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39404 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39405-static void (*pmi_start)(void) __read_mostly;
39406-static void (*pmi_pal) (void) __read_mostly;
39407+static void (*pmi_start)(void) __read_only;
39408+static void (*pmi_pal) (void) __read_only;
39409 static int depth __read_mostly;
39410 static int vga_compat __read_mostly;
39411 /* --------------------------------------------------------------------- */
39412@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39413 unsigned int size_vmode;
39414 unsigned int size_remap;
39415 unsigned int size_total;
39416+ void *pmi_code = NULL;
39417
39418 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39419 return -ENODEV;
39420@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39421 size_remap = size_total;
39422 vesafb_fix.smem_len = size_remap;
39423
39424-#ifndef __i386__
39425- screen_info.vesapm_seg = 0;
39426-#endif
39427-
39428 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39429 printk(KERN_WARNING
39430 "vesafb: cannot reserve video memory at 0x%lx\n",
39431@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39432 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39433 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39434
39435+#ifdef __i386__
39436+
39437+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39438+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39439+ if (!pmi_code)
39440+#elif !defined(CONFIG_PAX_KERNEXEC)
39441+ if (0)
39442+#endif
39443+
39444+#endif
39445+ screen_info.vesapm_seg = 0;
39446+
39447 if (screen_info.vesapm_seg) {
39448- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39449- screen_info.vesapm_seg,screen_info.vesapm_off);
39450+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39451+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39452 }
39453
39454 if (screen_info.vesapm_seg < 0xc000)
39455@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39456
39457 if (ypan || pmi_setpal) {
39458 unsigned short *pmi_base;
39459+
39460 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39461- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39462- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39463+
39464+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39465+ pax_open_kernel();
39466+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39467+#else
39468+ pmi_code = pmi_base;
39469+#endif
39470+
39471+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39472+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39473+
39474+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39475+ pmi_start = ktva_ktla(pmi_start);
39476+ pmi_pal = ktva_ktla(pmi_pal);
39477+ pax_close_kernel();
39478+#endif
39479+
39480 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39481 if (pmi_base[3]) {
39482 printk(KERN_INFO "vesafb: pmi: ports = ");
39483@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39484 info->node, info->fix.id);
39485 return 0;
39486 err:
39487+
39488+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39489+ module_free_exec(NULL, pmi_code);
39490+#endif
39491+
39492 if (info->screen_base)
39493 iounmap(info->screen_base);
39494 framebuffer_release(info);
39495diff -urNp linux-3.1.1/drivers/video/via/via_clock.h linux-3.1.1/drivers/video/via/via_clock.h
39496--- linux-3.1.1/drivers/video/via/via_clock.h 2011-11-11 15:19:27.000000000 -0500
39497+++ linux-3.1.1/drivers/video/via/via_clock.h 2011-11-16 18:39:08.000000000 -0500
39498@@ -56,7 +56,7 @@ struct via_clock {
39499
39500 void (*set_engine_pll_state)(u8 state);
39501 void (*set_engine_pll)(struct via_pll_config config);
39502-};
39503+} __no_const;
39504
39505
39506 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39507diff -urNp linux-3.1.1/drivers/virtio/virtio_balloon.c linux-3.1.1/drivers/virtio/virtio_balloon.c
39508--- linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-11 15:19:27.000000000 -0500
39509+++ linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-16 18:40:29.000000000 -0500
39510@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39511 struct sysinfo i;
39512 int idx = 0;
39513
39514+ pax_track_stack();
39515+
39516 all_vm_events(events);
39517 si_meminfo(&i);
39518
39519diff -urNp linux-3.1.1/drivers/xen/xen-pciback/conf_space.h linux-3.1.1/drivers/xen/xen-pciback/conf_space.h
39520--- linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-11 15:19:27.000000000 -0500
39521+++ linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-16 18:39:08.000000000 -0500
39522@@ -44,15 +44,15 @@ struct config_field {
39523 struct {
39524 conf_dword_write write;
39525 conf_dword_read read;
39526- } dw;
39527+ } __no_const dw;
39528 struct {
39529 conf_word_write write;
39530 conf_word_read read;
39531- } w;
39532+ } __no_const w;
39533 struct {
39534 conf_byte_write write;
39535 conf_byte_read read;
39536- } b;
39537+ } __no_const b;
39538 } u;
39539 struct list_head list;
39540 };
39541diff -urNp linux-3.1.1/fs/9p/vfs_inode.c linux-3.1.1/fs/9p/vfs_inode.c
39542--- linux-3.1.1/fs/9p/vfs_inode.c 2011-11-11 15:19:27.000000000 -0500
39543+++ linux-3.1.1/fs/9p/vfs_inode.c 2011-11-16 18:39:08.000000000 -0500
39544@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct
39545 void
39546 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39547 {
39548- char *s = nd_get_link(nd);
39549+ const char *s = nd_get_link(nd);
39550
39551 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39552 IS_ERR(s) ? "<error>" : s);
39553diff -urNp linux-3.1.1/fs/aio.c linux-3.1.1/fs/aio.c
39554--- linux-3.1.1/fs/aio.c 2011-11-11 15:19:27.000000000 -0500
39555+++ linux-3.1.1/fs/aio.c 2011-11-16 18:40:29.000000000 -0500
39556@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39557 size += sizeof(struct io_event) * nr_events;
39558 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39559
39560- if (nr_pages < 0)
39561+ if (nr_pages <= 0)
39562 return -EINVAL;
39563
39564 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39565@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39566 struct aio_timeout to;
39567 int retry = 0;
39568
39569+ pax_track_stack();
39570+
39571 /* needed to zero any padding within an entry (there shouldn't be
39572 * any, but C is fun!
39573 */
39574@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39575 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39576 {
39577 ssize_t ret;
39578+ struct iovec iovstack;
39579
39580 #ifdef CONFIG_COMPAT
39581 if (compat)
39582 ret = compat_rw_copy_check_uvector(type,
39583 (struct compat_iovec __user *)kiocb->ki_buf,
39584- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39585+ kiocb->ki_nbytes, 1, &iovstack,
39586 &kiocb->ki_iovec);
39587 else
39588 #endif
39589 ret = rw_copy_check_uvector(type,
39590 (struct iovec __user *)kiocb->ki_buf,
39591- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39592+ kiocb->ki_nbytes, 1, &iovstack,
39593 &kiocb->ki_iovec);
39594 if (ret < 0)
39595 goto out;
39596
39597+ if (kiocb->ki_iovec == &iovstack) {
39598+ kiocb->ki_inline_vec = iovstack;
39599+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39600+ }
39601 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39602 kiocb->ki_cur_seg = 0;
39603 /* ki_nbytes/left now reflect bytes instead of segs */
39604diff -urNp linux-3.1.1/fs/attr.c linux-3.1.1/fs/attr.c
39605--- linux-3.1.1/fs/attr.c 2011-11-11 15:19:27.000000000 -0500
39606+++ linux-3.1.1/fs/attr.c 2011-11-16 18:40:29.000000000 -0500
39607@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39608 unsigned long limit;
39609
39610 limit = rlimit(RLIMIT_FSIZE);
39611+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39612 if (limit != RLIM_INFINITY && offset > limit)
39613 goto out_sig;
39614 if (offset > inode->i_sb->s_maxbytes)
39615diff -urNp linux-3.1.1/fs/autofs4/waitq.c linux-3.1.1/fs/autofs4/waitq.c
39616--- linux-3.1.1/fs/autofs4/waitq.c 2011-11-11 15:19:27.000000000 -0500
39617+++ linux-3.1.1/fs/autofs4/waitq.c 2011-11-16 18:39:08.000000000 -0500
39618@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39619 {
39620 unsigned long sigpipe, flags;
39621 mm_segment_t fs;
39622- const char *data = (const char *)addr;
39623+ const char __user *data = (const char __force_user *)addr;
39624 ssize_t wr = 0;
39625
39626 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39627diff -urNp linux-3.1.1/fs/befs/linuxvfs.c linux-3.1.1/fs/befs/linuxvfs.c
39628--- linux-3.1.1/fs/befs/linuxvfs.c 2011-11-11 15:19:27.000000000 -0500
39629+++ linux-3.1.1/fs/befs/linuxvfs.c 2011-11-16 18:39:08.000000000 -0500
39630@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39631 {
39632 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39633 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39634- char *link = nd_get_link(nd);
39635+ const char *link = nd_get_link(nd);
39636 if (!IS_ERR(link))
39637 kfree(link);
39638 }
39639diff -urNp linux-3.1.1/fs/binfmt_aout.c linux-3.1.1/fs/binfmt_aout.c
39640--- linux-3.1.1/fs/binfmt_aout.c 2011-11-11 15:19:27.000000000 -0500
39641+++ linux-3.1.1/fs/binfmt_aout.c 2011-11-16 18:40:29.000000000 -0500
39642@@ -16,6 +16,7 @@
39643 #include <linux/string.h>
39644 #include <linux/fs.h>
39645 #include <linux/file.h>
39646+#include <linux/security.h>
39647 #include <linux/stat.h>
39648 #include <linux/fcntl.h>
39649 #include <linux/ptrace.h>
39650@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39651 #endif
39652 # define START_STACK(u) ((void __user *)u.start_stack)
39653
39654+ memset(&dump, 0, sizeof(dump));
39655+
39656 fs = get_fs();
39657 set_fs(KERNEL_DS);
39658 has_dumped = 1;
39659@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39660
39661 /* If the size of the dump file exceeds the rlimit, then see what would happen
39662 if we wrote the stack, but not the data area. */
39663+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39664 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39665 dump.u_dsize = 0;
39666
39667 /* Make sure we have enough room to write the stack and data areas. */
39668+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39669 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39670 dump.u_ssize = 0;
39671
39672@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39673 rlim = rlimit(RLIMIT_DATA);
39674 if (rlim >= RLIM_INFINITY)
39675 rlim = ~0;
39676+
39677+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39678 if (ex.a_data + ex.a_bss > rlim)
39679 return -ENOMEM;
39680
39681@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39682 install_exec_creds(bprm);
39683 current->flags &= ~PF_FORKNOEXEC;
39684
39685+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39686+ current->mm->pax_flags = 0UL;
39687+#endif
39688+
39689+#ifdef CONFIG_PAX_PAGEEXEC
39690+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39691+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39692+
39693+#ifdef CONFIG_PAX_EMUTRAMP
39694+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39695+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39696+#endif
39697+
39698+#ifdef CONFIG_PAX_MPROTECT
39699+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39700+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39701+#endif
39702+
39703+ }
39704+#endif
39705+
39706 if (N_MAGIC(ex) == OMAGIC) {
39707 unsigned long text_addr, map_size;
39708 loff_t pos;
39709@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39710
39711 down_write(&current->mm->mmap_sem);
39712 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39713- PROT_READ | PROT_WRITE | PROT_EXEC,
39714+ PROT_READ | PROT_WRITE,
39715 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39716 fd_offset + ex.a_text);
39717 up_write(&current->mm->mmap_sem);
39718diff -urNp linux-3.1.1/fs/binfmt_elf.c linux-3.1.1/fs/binfmt_elf.c
39719--- linux-3.1.1/fs/binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
39720+++ linux-3.1.1/fs/binfmt_elf.c 2011-11-16 18:40:29.000000000 -0500
39721@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39722 #define elf_core_dump NULL
39723 #endif
39724
39725+#ifdef CONFIG_PAX_MPROTECT
39726+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39727+#endif
39728+
39729 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39730 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39731 #else
39732@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39733 .load_binary = load_elf_binary,
39734 .load_shlib = load_elf_library,
39735 .core_dump = elf_core_dump,
39736+
39737+#ifdef CONFIG_PAX_MPROTECT
39738+ .handle_mprotect= elf_handle_mprotect,
39739+#endif
39740+
39741 .min_coredump = ELF_EXEC_PAGESIZE,
39742 };
39743
39744@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39745
39746 static int set_brk(unsigned long start, unsigned long end)
39747 {
39748+ unsigned long e = end;
39749+
39750 start = ELF_PAGEALIGN(start);
39751 end = ELF_PAGEALIGN(end);
39752 if (end > start) {
39753@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39754 if (BAD_ADDR(addr))
39755 return addr;
39756 }
39757- current->mm->start_brk = current->mm->brk = end;
39758+ current->mm->start_brk = current->mm->brk = e;
39759 return 0;
39760 }
39761
39762@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39763 elf_addr_t __user *u_rand_bytes;
39764 const char *k_platform = ELF_PLATFORM;
39765 const char *k_base_platform = ELF_BASE_PLATFORM;
39766- unsigned char k_rand_bytes[16];
39767+ u32 k_rand_bytes[4];
39768 int items;
39769 elf_addr_t *elf_info;
39770 int ei_index = 0;
39771 const struct cred *cred = current_cred();
39772 struct vm_area_struct *vma;
39773+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39774+
39775+ pax_track_stack();
39776
39777 /*
39778 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39779@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39780 * Generate 16 random bytes for userspace PRNG seeding.
39781 */
39782 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39783- u_rand_bytes = (elf_addr_t __user *)
39784- STACK_ALLOC(p, sizeof(k_rand_bytes));
39785+ srandom32(k_rand_bytes[0] ^ random32());
39786+ srandom32(k_rand_bytes[1] ^ random32());
39787+ srandom32(k_rand_bytes[2] ^ random32());
39788+ srandom32(k_rand_bytes[3] ^ random32());
39789+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39790+ u_rand_bytes = (elf_addr_t __user *) p;
39791 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39792 return -EFAULT;
39793
39794@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39795 return -EFAULT;
39796 current->mm->env_end = p;
39797
39798+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39799+
39800 /* Put the elf_info on the stack in the right place. */
39801 sp = (elf_addr_t __user *)envp + 1;
39802- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39803+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39804 return -EFAULT;
39805 return 0;
39806 }
39807@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39808 {
39809 struct elf_phdr *elf_phdata;
39810 struct elf_phdr *eppnt;
39811- unsigned long load_addr = 0;
39812+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39813 int load_addr_set = 0;
39814 unsigned long last_bss = 0, elf_bss = 0;
39815- unsigned long error = ~0UL;
39816+ unsigned long error = -EINVAL;
39817 unsigned long total_size;
39818 int retval, i, size;
39819
39820@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39821 goto out_close;
39822 }
39823
39824+#ifdef CONFIG_PAX_SEGMEXEC
39825+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39826+ pax_task_size = SEGMEXEC_TASK_SIZE;
39827+#endif
39828+
39829 eppnt = elf_phdata;
39830 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39831 if (eppnt->p_type == PT_LOAD) {
39832@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39833 k = load_addr + eppnt->p_vaddr;
39834 if (BAD_ADDR(k) ||
39835 eppnt->p_filesz > eppnt->p_memsz ||
39836- eppnt->p_memsz > TASK_SIZE ||
39837- TASK_SIZE - eppnt->p_memsz < k) {
39838+ eppnt->p_memsz > pax_task_size ||
39839+ pax_task_size - eppnt->p_memsz < k) {
39840 error = -ENOMEM;
39841 goto out_close;
39842 }
39843@@ -528,6 +553,193 @@ out:
39844 return error;
39845 }
39846
39847+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39848+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39849+{
39850+ unsigned long pax_flags = 0UL;
39851+
39852+#ifdef CONFIG_PAX_PAGEEXEC
39853+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39854+ pax_flags |= MF_PAX_PAGEEXEC;
39855+#endif
39856+
39857+#ifdef CONFIG_PAX_SEGMEXEC
39858+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39859+ pax_flags |= MF_PAX_SEGMEXEC;
39860+#endif
39861+
39862+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39863+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39864+ if ((__supported_pte_mask & _PAGE_NX))
39865+ pax_flags &= ~MF_PAX_SEGMEXEC;
39866+ else
39867+ pax_flags &= ~MF_PAX_PAGEEXEC;
39868+ }
39869+#endif
39870+
39871+#ifdef CONFIG_PAX_EMUTRAMP
39872+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39873+ pax_flags |= MF_PAX_EMUTRAMP;
39874+#endif
39875+
39876+#ifdef CONFIG_PAX_MPROTECT
39877+ if (elf_phdata->p_flags & PF_MPROTECT)
39878+ pax_flags |= MF_PAX_MPROTECT;
39879+#endif
39880+
39881+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39882+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39883+ pax_flags |= MF_PAX_RANDMMAP;
39884+#endif
39885+
39886+ return pax_flags;
39887+}
39888+#endif
39889+
39890+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39891+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39892+{
39893+ unsigned long pax_flags = 0UL;
39894+
39895+#ifdef CONFIG_PAX_PAGEEXEC
39896+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39897+ pax_flags |= MF_PAX_PAGEEXEC;
39898+#endif
39899+
39900+#ifdef CONFIG_PAX_SEGMEXEC
39901+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39902+ pax_flags |= MF_PAX_SEGMEXEC;
39903+#endif
39904+
39905+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39906+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39907+ if ((__supported_pte_mask & _PAGE_NX))
39908+ pax_flags &= ~MF_PAX_SEGMEXEC;
39909+ else
39910+ pax_flags &= ~MF_PAX_PAGEEXEC;
39911+ }
39912+#endif
39913+
39914+#ifdef CONFIG_PAX_EMUTRAMP
39915+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39916+ pax_flags |= MF_PAX_EMUTRAMP;
39917+#endif
39918+
39919+#ifdef CONFIG_PAX_MPROTECT
39920+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39921+ pax_flags |= MF_PAX_MPROTECT;
39922+#endif
39923+
39924+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39925+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39926+ pax_flags |= MF_PAX_RANDMMAP;
39927+#endif
39928+
39929+ return pax_flags;
39930+}
39931+#endif
39932+
39933+#ifdef CONFIG_PAX_EI_PAX
39934+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39935+{
39936+ unsigned long pax_flags = 0UL;
39937+
39938+#ifdef CONFIG_PAX_PAGEEXEC
39939+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39940+ pax_flags |= MF_PAX_PAGEEXEC;
39941+#endif
39942+
39943+#ifdef CONFIG_PAX_SEGMEXEC
39944+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39945+ pax_flags |= MF_PAX_SEGMEXEC;
39946+#endif
39947+
39948+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39949+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39950+ if ((__supported_pte_mask & _PAGE_NX))
39951+ pax_flags &= ~MF_PAX_SEGMEXEC;
39952+ else
39953+ pax_flags &= ~MF_PAX_PAGEEXEC;
39954+ }
39955+#endif
39956+
39957+#ifdef CONFIG_PAX_EMUTRAMP
39958+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39959+ pax_flags |= MF_PAX_EMUTRAMP;
39960+#endif
39961+
39962+#ifdef CONFIG_PAX_MPROTECT
39963+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39964+ pax_flags |= MF_PAX_MPROTECT;
39965+#endif
39966+
39967+#ifdef CONFIG_PAX_ASLR
39968+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39969+ pax_flags |= MF_PAX_RANDMMAP;
39970+#endif
39971+
39972+ return pax_flags;
39973+}
39974+#endif
39975+
39976+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39977+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39978+{
39979+ unsigned long pax_flags = 0UL;
39980+
39981+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39982+ unsigned long i;
39983+ int found_flags = 0;
39984+#endif
39985+
39986+#ifdef CONFIG_PAX_EI_PAX
39987+ pax_flags = pax_parse_ei_pax(elf_ex);
39988+#endif
39989+
39990+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39991+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39992+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39993+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39994+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39995+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39996+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39997+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39998+ return -EINVAL;
39999+
40000+#ifdef CONFIG_PAX_SOFTMODE
40001+ if (pax_softmode)
40002+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
40003+ else
40004+#endif
40005+
40006+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
40007+ found_flags = 1;
40008+ break;
40009+ }
40010+#endif
40011+
40012+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
40013+ if (found_flags == 0) {
40014+ struct elf_phdr phdr;
40015+ memset(&phdr, 0, sizeof(phdr));
40016+ phdr.p_flags = PF_NOEMUTRAMP;
40017+#ifdef CONFIG_PAX_SOFTMODE
40018+ if (pax_softmode)
40019+ pax_flags = pax_parse_softmode(&phdr);
40020+ else
40021+#endif
40022+ pax_flags = pax_parse_hardmode(&phdr);
40023+ }
40024+#endif
40025+
40026+ if (0 > pax_check_flags(&pax_flags))
40027+ return -EINVAL;
40028+
40029+ current->mm->pax_flags = pax_flags;
40030+ return 0;
40031+}
40032+#endif
40033+
40034 /*
40035 * These are the functions used to load ELF style executables and shared
40036 * libraries. There is no binary dependent code anywhere else.
40037@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40038 {
40039 unsigned int random_variable = 0;
40040
40041+#ifdef CONFIG_PAX_RANDUSTACK
40042+ if (randomize_va_space)
40043+ return stack_top - current->mm->delta_stack;
40044+#endif
40045+
40046 if ((current->flags & PF_RANDOMIZE) &&
40047 !(current->personality & ADDR_NO_RANDOMIZE)) {
40048 random_variable = get_random_int() & STACK_RND_MASK;
40049@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40050 unsigned long load_addr = 0, load_bias = 0;
40051 int load_addr_set = 0;
40052 char * elf_interpreter = NULL;
40053- unsigned long error;
40054+ unsigned long error = 0;
40055 struct elf_phdr *elf_ppnt, *elf_phdata;
40056 unsigned long elf_bss, elf_brk;
40057 int retval, i;
40058@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40059 unsigned long start_code, end_code, start_data, end_data;
40060 unsigned long reloc_func_desc __maybe_unused = 0;
40061 int executable_stack = EXSTACK_DEFAULT;
40062- unsigned long def_flags = 0;
40063 struct {
40064 struct elfhdr elf_ex;
40065 struct elfhdr interp_elf_ex;
40066 } *loc;
40067+ unsigned long pax_task_size = TASK_SIZE;
40068
40069 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40070 if (!loc) {
40071@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_
40072
40073 /* OK, This is the point of no return */
40074 current->flags &= ~PF_FORKNOEXEC;
40075- current->mm->def_flags = def_flags;
40076+
40077+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40078+ current->mm->pax_flags = 0UL;
40079+#endif
40080+
40081+#ifdef CONFIG_PAX_DLRESOLVE
40082+ current->mm->call_dl_resolve = 0UL;
40083+#endif
40084+
40085+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40086+ current->mm->call_syscall = 0UL;
40087+#endif
40088+
40089+#ifdef CONFIG_PAX_ASLR
40090+ current->mm->delta_mmap = 0UL;
40091+ current->mm->delta_stack = 0UL;
40092+#endif
40093+
40094+ current->mm->def_flags = 0;
40095+
40096+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40097+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40098+ send_sig(SIGKILL, current, 0);
40099+ goto out_free_dentry;
40100+ }
40101+#endif
40102+
40103+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40104+ pax_set_initial_flags(bprm);
40105+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40106+ if (pax_set_initial_flags_func)
40107+ (pax_set_initial_flags_func)(bprm);
40108+#endif
40109+
40110+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40111+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40112+ current->mm->context.user_cs_limit = PAGE_SIZE;
40113+ current->mm->def_flags |= VM_PAGEEXEC;
40114+ }
40115+#endif
40116+
40117+#ifdef CONFIG_PAX_SEGMEXEC
40118+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40119+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40120+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40121+ pax_task_size = SEGMEXEC_TASK_SIZE;
40122+ current->mm->def_flags |= VM_NOHUGEPAGE;
40123+ }
40124+#endif
40125+
40126+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40127+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40128+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40129+ put_cpu();
40130+ }
40131+#endif
40132
40133 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40134 may depend on the personality. */
40135 SET_PERSONALITY(loc->elf_ex);
40136+
40137+#ifdef CONFIG_PAX_ASLR
40138+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40139+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40140+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40141+ }
40142+#endif
40143+
40144+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40145+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40146+ executable_stack = EXSTACK_DISABLE_X;
40147+ current->personality &= ~READ_IMPLIES_EXEC;
40148+ } else
40149+#endif
40150+
40151 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40152 current->personality |= READ_IMPLIES_EXEC;
40153
40154@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_
40155 #else
40156 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40157 #endif
40158+
40159+#ifdef CONFIG_PAX_RANDMMAP
40160+ /* PaX: randomize base address at the default exe base if requested */
40161+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40162+#ifdef CONFIG_SPARC64
40163+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40164+#else
40165+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40166+#endif
40167+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40168+ elf_flags |= MAP_FIXED;
40169+ }
40170+#endif
40171+
40172 }
40173
40174 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40175@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_
40176 * allowed task size. Note that p_filesz must always be
40177 * <= p_memsz so it is only necessary to check p_memsz.
40178 */
40179- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40180- elf_ppnt->p_memsz > TASK_SIZE ||
40181- TASK_SIZE - elf_ppnt->p_memsz < k) {
40182+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40183+ elf_ppnt->p_memsz > pax_task_size ||
40184+ pax_task_size - elf_ppnt->p_memsz < k) {
40185 /* set_brk can never work. Avoid overflows. */
40186 send_sig(SIGKILL, current, 0);
40187 retval = -EINVAL;
40188@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_
40189 start_data += load_bias;
40190 end_data += load_bias;
40191
40192+#ifdef CONFIG_PAX_RANDMMAP
40193+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40194+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40195+#endif
40196+
40197 /* Calling set_brk effectively mmaps the pages that we need
40198 * for the bss and break sections. We must do this before
40199 * mapping in the interpreter, to make sure it doesn't wind
40200@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_
40201 goto out_free_dentry;
40202 }
40203 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40204- send_sig(SIGSEGV, current, 0);
40205- retval = -EFAULT; /* Nobody gets to see this, but.. */
40206- goto out_free_dentry;
40207+ /*
40208+ * This bss-zeroing can fail if the ELF
40209+ * file specifies odd protections. So
40210+ * we don't check the return value
40211+ */
40212 }
40213
40214 if (elf_interpreter) {
40215@@ -1098,7 +1406,7 @@ out:
40216 * Decide what to dump of a segment, part, all or none.
40217 */
40218 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40219- unsigned long mm_flags)
40220+ unsigned long mm_flags, long signr)
40221 {
40222 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40223
40224@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struc
40225 if (vma->vm_file == NULL)
40226 return 0;
40227
40228- if (FILTER(MAPPED_PRIVATE))
40229+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40230 goto whole;
40231
40232 /*
40233@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelf
40234 {
40235 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40236 int i = 0;
40237- do
40238+ do {
40239 i += 2;
40240- while (auxv[i - 2] != AT_NULL);
40241+ } while (auxv[i - 2] != AT_NULL);
40242 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40243 }
40244
40245@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfh
40246 }
40247
40248 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40249- unsigned long mm_flags)
40250+ struct coredump_params *cprm)
40251 {
40252 struct vm_area_struct *vma;
40253 size_t size = 0;
40254
40255 for (vma = first_vma(current, gate_vma); vma != NULL;
40256 vma = next_vma(vma, gate_vma))
40257- size += vma_dump_size(vma, mm_flags);
40258+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40259 return size;
40260 }
40261
40262@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump
40263
40264 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40265
40266- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40267+ offset += elf_core_vma_data_size(gate_vma, cprm);
40268 offset += elf_core_extra_data_size();
40269 e_shoff = offset;
40270
40271@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump
40272 offset = dataoff;
40273
40274 size += sizeof(*elf);
40275+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40276 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40277 goto end_coredump;
40278
40279 size += sizeof(*phdr4note);
40280+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40281 if (size > cprm->limit
40282 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40283 goto end_coredump;
40284@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump
40285 phdr.p_offset = offset;
40286 phdr.p_vaddr = vma->vm_start;
40287 phdr.p_paddr = 0;
40288- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40289+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40290 phdr.p_memsz = vma->vm_end - vma->vm_start;
40291 offset += phdr.p_filesz;
40292 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40293@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump
40294 phdr.p_align = ELF_EXEC_PAGESIZE;
40295
40296 size += sizeof(phdr);
40297+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40298 if (size > cprm->limit
40299 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40300 goto end_coredump;
40301@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump
40302 unsigned long addr;
40303 unsigned long end;
40304
40305- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40306+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40307
40308 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40309 struct page *page;
40310@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump
40311 page = get_dump_page(addr);
40312 if (page) {
40313 void *kaddr = kmap(page);
40314+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40315 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40316 !dump_write(cprm->file, kaddr,
40317 PAGE_SIZE);
40318@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump
40319
40320 if (e_phnum == PN_XNUM) {
40321 size += sizeof(*shdr4extnum);
40322+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40323 if (size > cprm->limit
40324 || !dump_write(cprm->file, shdr4extnum,
40325 sizeof(*shdr4extnum)))
40326@@ -2075,6 +2388,97 @@ out:
40327
40328 #endif /* CONFIG_ELF_CORE */
40329
40330+#ifdef CONFIG_PAX_MPROTECT
40331+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40332+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40333+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40334+ *
40335+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40336+ * basis because we want to allow the common case and not the special ones.
40337+ */
40338+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40339+{
40340+ struct elfhdr elf_h;
40341+ struct elf_phdr elf_p;
40342+ unsigned long i;
40343+ unsigned long oldflags;
40344+ bool is_textrel_rw, is_textrel_rx, is_relro;
40345+
40346+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40347+ return;
40348+
40349+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40350+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40351+
40352+#ifdef CONFIG_PAX_ELFRELOCS
40353+ /* possible TEXTREL */
40354+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40355+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40356+#else
40357+ is_textrel_rw = false;
40358+ is_textrel_rx = false;
40359+#endif
40360+
40361+ /* possible RELRO */
40362+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40363+
40364+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40365+ return;
40366+
40367+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40368+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40369+
40370+#ifdef CONFIG_PAX_ETEXECRELOCS
40371+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40372+#else
40373+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40374+#endif
40375+
40376+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40377+ !elf_check_arch(&elf_h) ||
40378+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40379+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40380+ return;
40381+
40382+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40383+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40384+ return;
40385+ switch (elf_p.p_type) {
40386+ case PT_DYNAMIC:
40387+ if (!is_textrel_rw && !is_textrel_rx)
40388+ continue;
40389+ i = 0UL;
40390+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40391+ elf_dyn dyn;
40392+
40393+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40394+ return;
40395+ if (dyn.d_tag == DT_NULL)
40396+ return;
40397+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40398+ gr_log_textrel(vma);
40399+ if (is_textrel_rw)
40400+ vma->vm_flags |= VM_MAYWRITE;
40401+ else
40402+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40403+ vma->vm_flags &= ~VM_MAYWRITE;
40404+ return;
40405+ }
40406+ i++;
40407+ }
40408+ return;
40409+
40410+ case PT_GNU_RELRO:
40411+ if (!is_relro)
40412+ continue;
40413+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40414+ vma->vm_flags &= ~VM_MAYWRITE;
40415+ return;
40416+ }
40417+ }
40418+}
40419+#endif
40420+
40421 static int __init init_elf_binfmt(void)
40422 {
40423 return register_binfmt(&elf_format);
40424diff -urNp linux-3.1.1/fs/binfmt_flat.c linux-3.1.1/fs/binfmt_flat.c
40425--- linux-3.1.1/fs/binfmt_flat.c 2011-11-11 15:19:27.000000000 -0500
40426+++ linux-3.1.1/fs/binfmt_flat.c 2011-11-16 18:39:08.000000000 -0500
40427@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40428 realdatastart = (unsigned long) -ENOMEM;
40429 printk("Unable to allocate RAM for process data, errno %d\n",
40430 (int)-realdatastart);
40431+ down_write(&current->mm->mmap_sem);
40432 do_munmap(current->mm, textpos, text_len);
40433+ up_write(&current->mm->mmap_sem);
40434 ret = realdatastart;
40435 goto err;
40436 }
40437@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40438 }
40439 if (IS_ERR_VALUE(result)) {
40440 printk("Unable to read data+bss, errno %d\n", (int)-result);
40441+ down_write(&current->mm->mmap_sem);
40442 do_munmap(current->mm, textpos, text_len);
40443 do_munmap(current->mm, realdatastart, len);
40444+ up_write(&current->mm->mmap_sem);
40445 ret = result;
40446 goto err;
40447 }
40448@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40449 }
40450 if (IS_ERR_VALUE(result)) {
40451 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40452+ down_write(&current->mm->mmap_sem);
40453 do_munmap(current->mm, textpos, text_len + data_len + extra +
40454 MAX_SHARED_LIBS * sizeof(unsigned long));
40455+ up_write(&current->mm->mmap_sem);
40456 ret = result;
40457 goto err;
40458 }
40459diff -urNp linux-3.1.1/fs/bio.c linux-3.1.1/fs/bio.c
40460--- linux-3.1.1/fs/bio.c 2011-11-11 15:19:27.000000000 -0500
40461+++ linux-3.1.1/fs/bio.c 2011-11-16 18:39:08.000000000 -0500
40462@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40463 const int read = bio_data_dir(bio) == READ;
40464 struct bio_map_data *bmd = bio->bi_private;
40465 int i;
40466- char *p = bmd->sgvecs[0].iov_base;
40467+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40468
40469 __bio_for_each_segment(bvec, bio, i, 0) {
40470 char *addr = page_address(bvec->bv_page);
40471diff -urNp linux-3.1.1/fs/block_dev.c linux-3.1.1/fs/block_dev.c
40472--- linux-3.1.1/fs/block_dev.c 2011-11-11 15:19:27.000000000 -0500
40473+++ linux-3.1.1/fs/block_dev.c 2011-11-16 18:39:08.000000000 -0500
40474@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_de
40475 else if (bdev->bd_contains == bdev)
40476 return true; /* is a whole device which isn't held */
40477
40478- else if (whole->bd_holder == bd_may_claim)
40479+ else if (whole->bd_holder == (void *)bd_may_claim)
40480 return true; /* is a partition of a device that is being partitioned */
40481 else if (whole->bd_holder != NULL)
40482 return false; /* is a partition of a held device */
40483diff -urNp linux-3.1.1/fs/btrfs/ctree.c linux-3.1.1/fs/btrfs/ctree.c
40484--- linux-3.1.1/fs/btrfs/ctree.c 2011-11-11 15:19:27.000000000 -0500
40485+++ linux-3.1.1/fs/btrfs/ctree.c 2011-11-16 18:39:08.000000000 -0500
40486@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(st
40487 free_extent_buffer(buf);
40488 add_root_to_dirty_list(root);
40489 } else {
40490- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40491- parent_start = parent->start;
40492- else
40493+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40494+ if (parent)
40495+ parent_start = parent->start;
40496+ else
40497+ parent_start = 0;
40498+ } else
40499 parent_start = 0;
40500
40501 WARN_ON(trans->transid != btrfs_header_generation(parent));
40502diff -urNp linux-3.1.1/fs/btrfs/inode.c linux-3.1.1/fs/btrfs/inode.c
40503--- linux-3.1.1/fs/btrfs/inode.c 2011-11-11 15:19:27.000000000 -0500
40504+++ linux-3.1.1/fs/btrfs/inode.c 2011-11-16 18:40:29.000000000 -0500
40505@@ -6922,7 +6922,7 @@ fail:
40506 return -ENOMEM;
40507 }
40508
40509-static int btrfs_getattr(struct vfsmount *mnt,
40510+int btrfs_getattr(struct vfsmount *mnt,
40511 struct dentry *dentry, struct kstat *stat)
40512 {
40513 struct inode *inode = dentry->d_inode;
40514@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount
40515 return 0;
40516 }
40517
40518+EXPORT_SYMBOL(btrfs_getattr);
40519+
40520+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40521+{
40522+ return BTRFS_I(inode)->root->anon_super.s_dev;
40523+}
40524+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40525+
40526 /*
40527 * If a file is moved, it will inherit the cow and compression flags of the new
40528 * directory.
40529diff -urNp linux-3.1.1/fs/btrfs/ioctl.c linux-3.1.1/fs/btrfs/ioctl.c
40530--- linux-3.1.1/fs/btrfs/ioctl.c 2011-11-11 15:19:27.000000000 -0500
40531+++ linux-3.1.1/fs/btrfs/ioctl.c 2011-11-16 18:40:29.000000000 -0500
40532@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs
40533 for (i = 0; i < num_types; i++) {
40534 struct btrfs_space_info *tmp;
40535
40536+ /* Don't copy in more than we allocated */
40537 if (!slot_count)
40538 break;
40539
40540+ slot_count--;
40541+
40542 info = NULL;
40543 rcu_read_lock();
40544 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40545@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs
40546 memcpy(dest, &space, sizeof(space));
40547 dest++;
40548 space_args.total_spaces++;
40549- slot_count--;
40550 }
40551- if (!slot_count)
40552- break;
40553 }
40554 up_read(&info->groups_sem);
40555 }
40556
40557- user_dest = (struct btrfs_ioctl_space_info *)
40558+ user_dest = (struct btrfs_ioctl_space_info __user *)
40559 (arg + sizeof(struct btrfs_ioctl_space_args));
40560
40561 if (copy_to_user(user_dest, dest_orig, alloc_size))
40562diff -urNp linux-3.1.1/fs/btrfs/relocation.c linux-3.1.1/fs/btrfs/relocation.c
40563--- linux-3.1.1/fs/btrfs/relocation.c 2011-11-11 15:19:27.000000000 -0500
40564+++ linux-3.1.1/fs/btrfs/relocation.c 2011-11-16 18:39:08.000000000 -0500
40565@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40566 }
40567 spin_unlock(&rc->reloc_root_tree.lock);
40568
40569- BUG_ON((struct btrfs_root *)node->data != root);
40570+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40571
40572 if (!del) {
40573 spin_lock(&rc->reloc_root_tree.lock);
40574diff -urNp linux-3.1.1/fs/cachefiles/bind.c linux-3.1.1/fs/cachefiles/bind.c
40575--- linux-3.1.1/fs/cachefiles/bind.c 2011-11-11 15:19:27.000000000 -0500
40576+++ linux-3.1.1/fs/cachefiles/bind.c 2011-11-16 18:39:08.000000000 -0500
40577@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40578 args);
40579
40580 /* start by checking things over */
40581- ASSERT(cache->fstop_percent >= 0 &&
40582- cache->fstop_percent < cache->fcull_percent &&
40583+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40584 cache->fcull_percent < cache->frun_percent &&
40585 cache->frun_percent < 100);
40586
40587- ASSERT(cache->bstop_percent >= 0 &&
40588- cache->bstop_percent < cache->bcull_percent &&
40589+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40590 cache->bcull_percent < cache->brun_percent &&
40591 cache->brun_percent < 100);
40592
40593diff -urNp linux-3.1.1/fs/cachefiles/daemon.c linux-3.1.1/fs/cachefiles/daemon.c
40594--- linux-3.1.1/fs/cachefiles/daemon.c 2011-11-11 15:19:27.000000000 -0500
40595+++ linux-3.1.1/fs/cachefiles/daemon.c 2011-11-16 18:39:08.000000000 -0500
40596@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40597 if (n > buflen)
40598 return -EMSGSIZE;
40599
40600- if (copy_to_user(_buffer, buffer, n) != 0)
40601+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40602 return -EFAULT;
40603
40604 return n;
40605@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40606 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40607 return -EIO;
40608
40609- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40610+ if (datalen > PAGE_SIZE - 1)
40611 return -EOPNOTSUPP;
40612
40613 /* drag the command string into the kernel so we can parse it */
40614@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40615 if (args[0] != '%' || args[1] != '\0')
40616 return -EINVAL;
40617
40618- if (fstop < 0 || fstop >= cache->fcull_percent)
40619+ if (fstop >= cache->fcull_percent)
40620 return cachefiles_daemon_range_error(cache, args);
40621
40622 cache->fstop_percent = fstop;
40623@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40624 if (args[0] != '%' || args[1] != '\0')
40625 return -EINVAL;
40626
40627- if (bstop < 0 || bstop >= cache->bcull_percent)
40628+ if (bstop >= cache->bcull_percent)
40629 return cachefiles_daemon_range_error(cache, args);
40630
40631 cache->bstop_percent = bstop;
40632diff -urNp linux-3.1.1/fs/cachefiles/internal.h linux-3.1.1/fs/cachefiles/internal.h
40633--- linux-3.1.1/fs/cachefiles/internal.h 2011-11-11 15:19:27.000000000 -0500
40634+++ linux-3.1.1/fs/cachefiles/internal.h 2011-11-16 18:39:08.000000000 -0500
40635@@ -57,7 +57,7 @@ struct cachefiles_cache {
40636 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40637 struct rb_root active_nodes; /* active nodes (can't be culled) */
40638 rwlock_t active_lock; /* lock for active_nodes */
40639- atomic_t gravecounter; /* graveyard uniquifier */
40640+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40641 unsigned frun_percent; /* when to stop culling (% files) */
40642 unsigned fcull_percent; /* when to start culling (% files) */
40643 unsigned fstop_percent; /* when to stop allocating (% files) */
40644@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40645 * proc.c
40646 */
40647 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40648-extern atomic_t cachefiles_lookup_histogram[HZ];
40649-extern atomic_t cachefiles_mkdir_histogram[HZ];
40650-extern atomic_t cachefiles_create_histogram[HZ];
40651+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40652+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40653+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40654
40655 extern int __init cachefiles_proc_init(void);
40656 extern void cachefiles_proc_cleanup(void);
40657 static inline
40658-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40659+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40660 {
40661 unsigned long jif = jiffies - start_jif;
40662 if (jif >= HZ)
40663 jif = HZ - 1;
40664- atomic_inc(&histogram[jif]);
40665+ atomic_inc_unchecked(&histogram[jif]);
40666 }
40667
40668 #else
40669diff -urNp linux-3.1.1/fs/cachefiles/namei.c linux-3.1.1/fs/cachefiles/namei.c
40670--- linux-3.1.1/fs/cachefiles/namei.c 2011-11-11 15:19:27.000000000 -0500
40671+++ linux-3.1.1/fs/cachefiles/namei.c 2011-11-16 18:39:08.000000000 -0500
40672@@ -318,7 +318,7 @@ try_again:
40673 /* first step is to make up a grave dentry in the graveyard */
40674 sprintf(nbuffer, "%08x%08x",
40675 (uint32_t) get_seconds(),
40676- (uint32_t) atomic_inc_return(&cache->gravecounter));
40677+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40678
40679 /* do the multiway lock magic */
40680 trap = lock_rename(cache->graveyard, dir);
40681diff -urNp linux-3.1.1/fs/cachefiles/proc.c linux-3.1.1/fs/cachefiles/proc.c
40682--- linux-3.1.1/fs/cachefiles/proc.c 2011-11-11 15:19:27.000000000 -0500
40683+++ linux-3.1.1/fs/cachefiles/proc.c 2011-11-16 18:39:08.000000000 -0500
40684@@ -14,9 +14,9 @@
40685 #include <linux/seq_file.h>
40686 #include "internal.h"
40687
40688-atomic_t cachefiles_lookup_histogram[HZ];
40689-atomic_t cachefiles_mkdir_histogram[HZ];
40690-atomic_t cachefiles_create_histogram[HZ];
40691+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40692+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40693+atomic_unchecked_t cachefiles_create_histogram[HZ];
40694
40695 /*
40696 * display the latency histogram
40697@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40698 return 0;
40699 default:
40700 index = (unsigned long) v - 3;
40701- x = atomic_read(&cachefiles_lookup_histogram[index]);
40702- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40703- z = atomic_read(&cachefiles_create_histogram[index]);
40704+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40705+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40706+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40707 if (x == 0 && y == 0 && z == 0)
40708 return 0;
40709
40710diff -urNp linux-3.1.1/fs/cachefiles/rdwr.c linux-3.1.1/fs/cachefiles/rdwr.c
40711--- linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-11 15:19:27.000000000 -0500
40712+++ linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-16 18:39:08.000000000 -0500
40713@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40714 old_fs = get_fs();
40715 set_fs(KERNEL_DS);
40716 ret = file->f_op->write(
40717- file, (const void __user *) data, len, &pos);
40718+ file, (const void __force_user *) data, len, &pos);
40719 set_fs(old_fs);
40720 kunmap(page);
40721 if (ret != len)
40722diff -urNp linux-3.1.1/fs/ceph/dir.c linux-3.1.1/fs/ceph/dir.c
40723--- linux-3.1.1/fs/ceph/dir.c 2011-11-11 15:19:27.000000000 -0500
40724+++ linux-3.1.1/fs/ceph/dir.c 2011-11-16 18:39:08.000000000 -0500
40725@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
40726 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40727 struct ceph_mds_client *mdsc = fsc->mdsc;
40728 unsigned frag = fpos_frag(filp->f_pos);
40729- int off = fpos_off(filp->f_pos);
40730+ unsigned int off = fpos_off(filp->f_pos);
40731 int err;
40732 u32 ftype;
40733 struct ceph_mds_reply_info_parsed *rinfo;
40734diff -urNp linux-3.1.1/fs/cifs/cifs_debug.c linux-3.1.1/fs/cifs/cifs_debug.c
40735--- linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-11 15:19:27.000000000 -0500
40736+++ linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-16 18:39:08.000000000 -0500
40737@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40738
40739 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40740 #ifdef CONFIG_CIFS_STATS2
40741- atomic_set(&totBufAllocCount, 0);
40742- atomic_set(&totSmBufAllocCount, 0);
40743+ atomic_set_unchecked(&totBufAllocCount, 0);
40744+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40745 #endif /* CONFIG_CIFS_STATS2 */
40746 spin_lock(&cifs_tcp_ses_lock);
40747 list_for_each(tmp1, &cifs_tcp_ses_list) {
40748@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40749 tcon = list_entry(tmp3,
40750 struct cifs_tcon,
40751 tcon_list);
40752- atomic_set(&tcon->num_smbs_sent, 0);
40753- atomic_set(&tcon->num_writes, 0);
40754- atomic_set(&tcon->num_reads, 0);
40755- atomic_set(&tcon->num_oplock_brks, 0);
40756- atomic_set(&tcon->num_opens, 0);
40757- atomic_set(&tcon->num_posixopens, 0);
40758- atomic_set(&tcon->num_posixmkdirs, 0);
40759- atomic_set(&tcon->num_closes, 0);
40760- atomic_set(&tcon->num_deletes, 0);
40761- atomic_set(&tcon->num_mkdirs, 0);
40762- atomic_set(&tcon->num_rmdirs, 0);
40763- atomic_set(&tcon->num_renames, 0);
40764- atomic_set(&tcon->num_t2renames, 0);
40765- atomic_set(&tcon->num_ffirst, 0);
40766- atomic_set(&tcon->num_fnext, 0);
40767- atomic_set(&tcon->num_fclose, 0);
40768- atomic_set(&tcon->num_hardlinks, 0);
40769- atomic_set(&tcon->num_symlinks, 0);
40770- atomic_set(&tcon->num_locks, 0);
40771+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40772+ atomic_set_unchecked(&tcon->num_writes, 0);
40773+ atomic_set_unchecked(&tcon->num_reads, 0);
40774+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40775+ atomic_set_unchecked(&tcon->num_opens, 0);
40776+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40777+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40778+ atomic_set_unchecked(&tcon->num_closes, 0);
40779+ atomic_set_unchecked(&tcon->num_deletes, 0);
40780+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40781+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40782+ atomic_set_unchecked(&tcon->num_renames, 0);
40783+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40784+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40785+ atomic_set_unchecked(&tcon->num_fnext, 0);
40786+ atomic_set_unchecked(&tcon->num_fclose, 0);
40787+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40788+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40789+ atomic_set_unchecked(&tcon->num_locks, 0);
40790 }
40791 }
40792 }
40793@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40794 smBufAllocCount.counter, cifs_min_small);
40795 #ifdef CONFIG_CIFS_STATS2
40796 seq_printf(m, "Total Large %d Small %d Allocations\n",
40797- atomic_read(&totBufAllocCount),
40798- atomic_read(&totSmBufAllocCount));
40799+ atomic_read_unchecked(&totBufAllocCount),
40800+ atomic_read_unchecked(&totSmBufAllocCount));
40801 #endif /* CONFIG_CIFS_STATS2 */
40802
40803 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40804@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40805 if (tcon->need_reconnect)
40806 seq_puts(m, "\tDISCONNECTED ");
40807 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40808- atomic_read(&tcon->num_smbs_sent),
40809- atomic_read(&tcon->num_oplock_brks));
40810+ atomic_read_unchecked(&tcon->num_smbs_sent),
40811+ atomic_read_unchecked(&tcon->num_oplock_brks));
40812 seq_printf(m, "\nReads: %d Bytes: %lld",
40813- atomic_read(&tcon->num_reads),
40814+ atomic_read_unchecked(&tcon->num_reads),
40815 (long long)(tcon->bytes_read));
40816 seq_printf(m, "\nWrites: %d Bytes: %lld",
40817- atomic_read(&tcon->num_writes),
40818+ atomic_read_unchecked(&tcon->num_writes),
40819 (long long)(tcon->bytes_written));
40820 seq_printf(m, "\nFlushes: %d",
40821- atomic_read(&tcon->num_flushes));
40822+ atomic_read_unchecked(&tcon->num_flushes));
40823 seq_printf(m, "\nLocks: %d HardLinks: %d "
40824 "Symlinks: %d",
40825- atomic_read(&tcon->num_locks),
40826- atomic_read(&tcon->num_hardlinks),
40827- atomic_read(&tcon->num_symlinks));
40828+ atomic_read_unchecked(&tcon->num_locks),
40829+ atomic_read_unchecked(&tcon->num_hardlinks),
40830+ atomic_read_unchecked(&tcon->num_symlinks));
40831 seq_printf(m, "\nOpens: %d Closes: %d "
40832 "Deletes: %d",
40833- atomic_read(&tcon->num_opens),
40834- atomic_read(&tcon->num_closes),
40835- atomic_read(&tcon->num_deletes));
40836+ atomic_read_unchecked(&tcon->num_opens),
40837+ atomic_read_unchecked(&tcon->num_closes),
40838+ atomic_read_unchecked(&tcon->num_deletes));
40839 seq_printf(m, "\nPosix Opens: %d "
40840 "Posix Mkdirs: %d",
40841- atomic_read(&tcon->num_posixopens),
40842- atomic_read(&tcon->num_posixmkdirs));
40843+ atomic_read_unchecked(&tcon->num_posixopens),
40844+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40845 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40846- atomic_read(&tcon->num_mkdirs),
40847- atomic_read(&tcon->num_rmdirs));
40848+ atomic_read_unchecked(&tcon->num_mkdirs),
40849+ atomic_read_unchecked(&tcon->num_rmdirs));
40850 seq_printf(m, "\nRenames: %d T2 Renames %d",
40851- atomic_read(&tcon->num_renames),
40852- atomic_read(&tcon->num_t2renames));
40853+ atomic_read_unchecked(&tcon->num_renames),
40854+ atomic_read_unchecked(&tcon->num_t2renames));
40855 seq_printf(m, "\nFindFirst: %d FNext %d "
40856 "FClose %d",
40857- atomic_read(&tcon->num_ffirst),
40858- atomic_read(&tcon->num_fnext),
40859- atomic_read(&tcon->num_fclose));
40860+ atomic_read_unchecked(&tcon->num_ffirst),
40861+ atomic_read_unchecked(&tcon->num_fnext),
40862+ atomic_read_unchecked(&tcon->num_fclose));
40863 }
40864 }
40865 }
40866diff -urNp linux-3.1.1/fs/cifs/cifsfs.c linux-3.1.1/fs/cifs/cifsfs.c
40867--- linux-3.1.1/fs/cifs/cifsfs.c 2011-11-11 15:19:27.000000000 -0500
40868+++ linux-3.1.1/fs/cifs/cifsfs.c 2011-11-16 18:39:08.000000000 -0500
40869@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
40870 cifs_req_cachep = kmem_cache_create("cifs_request",
40871 CIFSMaxBufSize +
40872 MAX_CIFS_HDR_SIZE, 0,
40873- SLAB_HWCACHE_ALIGN, NULL);
40874+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40875 if (cifs_req_cachep == NULL)
40876 return -ENOMEM;
40877
40878@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
40879 efficient to alloc 1 per page off the slab compared to 17K (5page)
40880 alloc of large cifs buffers even when page debugging is on */
40881 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40882- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40883+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40884 NULL);
40885 if (cifs_sm_req_cachep == NULL) {
40886 mempool_destroy(cifs_req_poolp);
40887@@ -1093,8 +1093,8 @@ init_cifs(void)
40888 atomic_set(&bufAllocCount, 0);
40889 atomic_set(&smBufAllocCount, 0);
40890 #ifdef CONFIG_CIFS_STATS2
40891- atomic_set(&totBufAllocCount, 0);
40892- atomic_set(&totSmBufAllocCount, 0);
40893+ atomic_set_unchecked(&totBufAllocCount, 0);
40894+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40895 #endif /* CONFIG_CIFS_STATS2 */
40896
40897 atomic_set(&midCount, 0);
40898diff -urNp linux-3.1.1/fs/cifs/cifsglob.h linux-3.1.1/fs/cifs/cifsglob.h
40899--- linux-3.1.1/fs/cifs/cifsglob.h 2011-11-11 15:19:27.000000000 -0500
40900+++ linux-3.1.1/fs/cifs/cifsglob.h 2011-11-16 18:39:08.000000000 -0500
40901@@ -381,28 +381,28 @@ struct cifs_tcon {
40902 __u16 Flags; /* optional support bits */
40903 enum statusEnum tidStatus;
40904 #ifdef CONFIG_CIFS_STATS
40905- atomic_t num_smbs_sent;
40906- atomic_t num_writes;
40907- atomic_t num_reads;
40908- atomic_t num_flushes;
40909- atomic_t num_oplock_brks;
40910- atomic_t num_opens;
40911- atomic_t num_closes;
40912- atomic_t num_deletes;
40913- atomic_t num_mkdirs;
40914- atomic_t num_posixopens;
40915- atomic_t num_posixmkdirs;
40916- atomic_t num_rmdirs;
40917- atomic_t num_renames;
40918- atomic_t num_t2renames;
40919- atomic_t num_ffirst;
40920- atomic_t num_fnext;
40921- atomic_t num_fclose;
40922- atomic_t num_hardlinks;
40923- atomic_t num_symlinks;
40924- atomic_t num_locks;
40925- atomic_t num_acl_get;
40926- atomic_t num_acl_set;
40927+ atomic_unchecked_t num_smbs_sent;
40928+ atomic_unchecked_t num_writes;
40929+ atomic_unchecked_t num_reads;
40930+ atomic_unchecked_t num_flushes;
40931+ atomic_unchecked_t num_oplock_brks;
40932+ atomic_unchecked_t num_opens;
40933+ atomic_unchecked_t num_closes;
40934+ atomic_unchecked_t num_deletes;
40935+ atomic_unchecked_t num_mkdirs;
40936+ atomic_unchecked_t num_posixopens;
40937+ atomic_unchecked_t num_posixmkdirs;
40938+ atomic_unchecked_t num_rmdirs;
40939+ atomic_unchecked_t num_renames;
40940+ atomic_unchecked_t num_t2renames;
40941+ atomic_unchecked_t num_ffirst;
40942+ atomic_unchecked_t num_fnext;
40943+ atomic_unchecked_t num_fclose;
40944+ atomic_unchecked_t num_hardlinks;
40945+ atomic_unchecked_t num_symlinks;
40946+ atomic_unchecked_t num_locks;
40947+ atomic_unchecked_t num_acl_get;
40948+ atomic_unchecked_t num_acl_set;
40949 #ifdef CONFIG_CIFS_STATS2
40950 unsigned long long time_writes;
40951 unsigned long long time_reads;
40952@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40953 }
40954
40955 #ifdef CONFIG_CIFS_STATS
40956-#define cifs_stats_inc atomic_inc
40957+#define cifs_stats_inc atomic_inc_unchecked
40958
40959 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40960 unsigned int bytes)
40961@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40962 /* Various Debug counters */
40963 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40964 #ifdef CONFIG_CIFS_STATS2
40965-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40966-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40967+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40968+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40969 #endif
40970 GLOBAL_EXTERN atomic_t smBufAllocCount;
40971 GLOBAL_EXTERN atomic_t midCount;
40972diff -urNp linux-3.1.1/fs/cifs/link.c linux-3.1.1/fs/cifs/link.c
40973--- linux-3.1.1/fs/cifs/link.c 2011-11-11 15:19:27.000000000 -0500
40974+++ linux-3.1.1/fs/cifs/link.c 2011-11-16 18:39:08.000000000 -0500
40975@@ -593,7 +593,7 @@ symlink_exit:
40976
40977 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40978 {
40979- char *p = nd_get_link(nd);
40980+ const char *p = nd_get_link(nd);
40981 if (!IS_ERR(p))
40982 kfree(p);
40983 }
40984diff -urNp linux-3.1.1/fs/cifs/misc.c linux-3.1.1/fs/cifs/misc.c
40985--- linux-3.1.1/fs/cifs/misc.c 2011-11-11 15:19:27.000000000 -0500
40986+++ linux-3.1.1/fs/cifs/misc.c 2011-11-16 18:39:08.000000000 -0500
40987@@ -156,7 +156,7 @@ cifs_buf_get(void)
40988 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40989 atomic_inc(&bufAllocCount);
40990 #ifdef CONFIG_CIFS_STATS2
40991- atomic_inc(&totBufAllocCount);
40992+ atomic_inc_unchecked(&totBufAllocCount);
40993 #endif /* CONFIG_CIFS_STATS2 */
40994 }
40995
40996@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40997 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40998 atomic_inc(&smBufAllocCount);
40999 #ifdef CONFIG_CIFS_STATS2
41000- atomic_inc(&totSmBufAllocCount);
41001+ atomic_inc_unchecked(&totSmBufAllocCount);
41002 #endif /* CONFIG_CIFS_STATS2 */
41003
41004 }
41005diff -urNp linux-3.1.1/fs/coda/cache.c linux-3.1.1/fs/coda/cache.c
41006--- linux-3.1.1/fs/coda/cache.c 2011-11-11 15:19:27.000000000 -0500
41007+++ linux-3.1.1/fs/coda/cache.c 2011-11-16 18:39:08.000000000 -0500
41008@@ -24,7 +24,7 @@
41009 #include "coda_linux.h"
41010 #include "coda_cache.h"
41011
41012-static atomic_t permission_epoch = ATOMIC_INIT(0);
41013+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41014
41015 /* replace or extend an acl cache hit */
41016 void coda_cache_enter(struct inode *inode, int mask)
41017@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41018 struct coda_inode_info *cii = ITOC(inode);
41019
41020 spin_lock(&cii->c_lock);
41021- cii->c_cached_epoch = atomic_read(&permission_epoch);
41022+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41023 if (cii->c_uid != current_fsuid()) {
41024 cii->c_uid = current_fsuid();
41025 cii->c_cached_perm = mask;
41026@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41027 {
41028 struct coda_inode_info *cii = ITOC(inode);
41029 spin_lock(&cii->c_lock);
41030- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41031+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41032 spin_unlock(&cii->c_lock);
41033 }
41034
41035 /* remove all acl caches */
41036 void coda_cache_clear_all(struct super_block *sb)
41037 {
41038- atomic_inc(&permission_epoch);
41039+ atomic_inc_unchecked(&permission_epoch);
41040 }
41041
41042
41043@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41044 spin_lock(&cii->c_lock);
41045 hit = (mask & cii->c_cached_perm) == mask &&
41046 cii->c_uid == current_fsuid() &&
41047- cii->c_cached_epoch == atomic_read(&permission_epoch);
41048+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41049 spin_unlock(&cii->c_lock);
41050
41051 return hit;
41052diff -urNp linux-3.1.1/fs/compat_binfmt_elf.c linux-3.1.1/fs/compat_binfmt_elf.c
41053--- linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
41054+++ linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-16 18:39:08.000000000 -0500
41055@@ -30,11 +30,13 @@
41056 #undef elf_phdr
41057 #undef elf_shdr
41058 #undef elf_note
41059+#undef elf_dyn
41060 #undef elf_addr_t
41061 #define elfhdr elf32_hdr
41062 #define elf_phdr elf32_phdr
41063 #define elf_shdr elf32_shdr
41064 #define elf_note elf32_note
41065+#define elf_dyn Elf32_Dyn
41066 #define elf_addr_t Elf32_Addr
41067
41068 /*
41069diff -urNp linux-3.1.1/fs/compat.c linux-3.1.1/fs/compat.c
41070--- linux-3.1.1/fs/compat.c 2011-11-11 15:19:27.000000000 -0500
41071+++ linux-3.1.1/fs/compat.c 2011-11-16 18:40:29.000000000 -0500
41072@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41073 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41074 {
41075 compat_ino_t ino = stat->ino;
41076- typeof(ubuf->st_uid) uid = 0;
41077- typeof(ubuf->st_gid) gid = 0;
41078+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41079+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41080 int err;
41081
41082 SET_UID(uid, stat->uid);
41083@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41084
41085 set_fs(KERNEL_DS);
41086 /* The __user pointer cast is valid because of the set_fs() */
41087- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41088+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41089 set_fs(oldfs);
41090 /* truncating is ok because it's a user address */
41091 if (!ret)
41092@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
41093 goto out;
41094
41095 ret = -EINVAL;
41096- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41097+ if (nr_segs > UIO_MAXIOV)
41098 goto out;
41099 if (nr_segs > fast_segs) {
41100 ret = -ENOMEM;
41101@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
41102
41103 struct compat_readdir_callback {
41104 struct compat_old_linux_dirent __user *dirent;
41105+ struct file * file;
41106 int result;
41107 };
41108
41109@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
41110 buf->result = -EOVERFLOW;
41111 return -EOVERFLOW;
41112 }
41113+
41114+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41115+ return 0;
41116+
41117 buf->result++;
41118 dirent = buf->dirent;
41119 if (!access_ok(VERIFY_WRITE, dirent,
41120@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
41121
41122 buf.result = 0;
41123 buf.dirent = dirent;
41124+ buf.file = file;
41125
41126 error = vfs_readdir(file, compat_fillonedir, &buf);
41127 if (buf.result)
41128@@ -917,6 +923,7 @@ struct compat_linux_dirent {
41129 struct compat_getdents_callback {
41130 struct compat_linux_dirent __user *current_dir;
41131 struct compat_linux_dirent __user *previous;
41132+ struct file * file;
41133 int count;
41134 int error;
41135 };
41136@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
41137 buf->error = -EOVERFLOW;
41138 return -EOVERFLOW;
41139 }
41140+
41141+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41142+ return 0;
41143+
41144 dirent = buf->previous;
41145 if (dirent) {
41146 if (__put_user(offset, &dirent->d_off))
41147@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
41148 buf.previous = NULL;
41149 buf.count = count;
41150 buf.error = 0;
41151+ buf.file = file;
41152
41153 error = vfs_readdir(file, compat_filldir, &buf);
41154 if (error >= 0)
41155@@ -1006,6 +1018,7 @@ out:
41156 struct compat_getdents_callback64 {
41157 struct linux_dirent64 __user *current_dir;
41158 struct linux_dirent64 __user *previous;
41159+ struct file * file;
41160 int count;
41161 int error;
41162 };
41163@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
41164 buf->error = -EINVAL; /* only used if we fail.. */
41165 if (reclen > buf->count)
41166 return -EINVAL;
41167+
41168+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41169+ return 0;
41170+
41171 dirent = buf->previous;
41172
41173 if (dirent) {
41174@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
41175 buf.previous = NULL;
41176 buf.count = count;
41177 buf.error = 0;
41178+ buf.file = file;
41179
41180 error = vfs_readdir(file, compat_filldir64, &buf);
41181 if (error >= 0)
41182 error = buf.error;
41183 lastdirent = buf.previous;
41184 if (lastdirent) {
41185- typeof(lastdirent->d_off) d_off = file->f_pos;
41186+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41187 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41188 error = -EFAULT;
41189 else
41190@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
41191 struct fdtable *fdt;
41192 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41193
41194+ pax_track_stack();
41195+
41196 if (n < 0)
41197 goto out_nofds;
41198
41199diff -urNp linux-3.1.1/fs/compat_ioctl.c linux-3.1.1/fs/compat_ioctl.c
41200--- linux-3.1.1/fs/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
41201+++ linux-3.1.1/fs/compat_ioctl.c 2011-11-16 18:39:08.000000000 -0500
41202@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
41203
41204 err = get_user(palp, &up->palette);
41205 err |= get_user(length, &up->length);
41206+ if (err)
41207+ return -EFAULT;
41208
41209 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41210 err = put_user(compat_ptr(palp), &up_native->palette);
41211@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
41212 return -EFAULT;
41213 if (__get_user(udata, &ss32->iomem_base))
41214 return -EFAULT;
41215- ss.iomem_base = compat_ptr(udata);
41216+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41217 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41218 __get_user(ss.port_high, &ss32->port_high))
41219 return -EFAULT;
41220@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
41221 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41222 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41223 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41224- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41225+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41226 return -EFAULT;
41227
41228 return ioctl_preallocate(file, p);
41229@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigne
41230 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41231 {
41232 unsigned int a, b;
41233- a = *(unsigned int *)p;
41234- b = *(unsigned int *)q;
41235+ a = *(const unsigned int *)p;
41236+ b = *(const unsigned int *)q;
41237 if (a > b)
41238 return 1;
41239 if (a < b)
41240diff -urNp linux-3.1.1/fs/configfs/dir.c linux-3.1.1/fs/configfs/dir.c
41241--- linux-3.1.1/fs/configfs/dir.c 2011-11-11 15:19:27.000000000 -0500
41242+++ linux-3.1.1/fs/configfs/dir.c 2011-11-16 18:39:08.000000000 -0500
41243@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
41244 }
41245 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41246 struct configfs_dirent *next;
41247- const char * name;
41248+ const unsigned char * name;
41249+ char d_name[sizeof(next->s_dentry->d_iname)];
41250 int len;
41251 struct inode *inode = NULL;
41252
41253@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
41254 continue;
41255
41256 name = configfs_get_name(next);
41257- len = strlen(name);
41258+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41259+ len = next->s_dentry->d_name.len;
41260+ memcpy(d_name, name, len);
41261+ name = d_name;
41262+ } else
41263+ len = strlen(name);
41264
41265 /*
41266 * We'll have a dentry and an inode for
41267diff -urNp linux-3.1.1/fs/dcache.c linux-3.1.1/fs/dcache.c
41268--- linux-3.1.1/fs/dcache.c 2011-11-11 15:19:27.000000000 -0500
41269+++ linux-3.1.1/fs/dcache.c 2011-11-16 18:39:08.000000000 -0500
41270@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned lon
41271 mempages -= reserve;
41272
41273 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41274- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41275+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41276
41277 dcache_init();
41278 inode_init();
41279diff -urNp linux-3.1.1/fs/ecryptfs/inode.c linux-3.1.1/fs/ecryptfs/inode.c
41280--- linux-3.1.1/fs/ecryptfs/inode.c 2011-11-11 15:19:27.000000000 -0500
41281+++ linux-3.1.1/fs/ecryptfs/inode.c 2011-11-16 18:39:08.000000000 -0500
41282@@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struc
41283 old_fs = get_fs();
41284 set_fs(get_ds());
41285 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41286- (char __user *)lower_buf,
41287+ (char __force_user *)lower_buf,
41288 lower_bufsiz);
41289 set_fs(old_fs);
41290 if (rc < 0)
41291@@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct
41292 }
41293 old_fs = get_fs();
41294 set_fs(get_ds());
41295- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41296+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41297 set_fs(old_fs);
41298 if (rc < 0) {
41299 kfree(buf);
41300@@ -742,7 +742,7 @@ out:
41301 static void
41302 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41303 {
41304- char *buf = nd_get_link(nd);
41305+ const char *buf = nd_get_link(nd);
41306 if (!IS_ERR(buf)) {
41307 /* Free the char* */
41308 kfree(buf);
41309diff -urNp linux-3.1.1/fs/ecryptfs/miscdev.c linux-3.1.1/fs/ecryptfs/miscdev.c
41310--- linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-11 15:19:27.000000000 -0500
41311+++ linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-16 18:39:08.000000000 -0500
41312@@ -328,7 +328,7 @@ check_list:
41313 goto out_unlock_msg_ctx;
41314 i = 5;
41315 if (msg_ctx->msg) {
41316- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41317+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41318 goto out_unlock_msg_ctx;
41319 i += packet_length_size;
41320 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41321diff -urNp linux-3.1.1/fs/ecryptfs/read_write.c linux-3.1.1/fs/ecryptfs/read_write.c
41322--- linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-11 15:19:27.000000000 -0500
41323+++ linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-16 18:39:08.000000000 -0500
41324@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
41325 return -EIO;
41326 fs_save = get_fs();
41327 set_fs(get_ds());
41328- rc = vfs_write(lower_file, data, size, &offset);
41329+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41330 set_fs(fs_save);
41331 mark_inode_dirty_sync(ecryptfs_inode);
41332 return rc;
41333@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
41334 return -EIO;
41335 fs_save = get_fs();
41336 set_fs(get_ds());
41337- rc = vfs_read(lower_file, data, size, &offset);
41338+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41339 set_fs(fs_save);
41340 return rc;
41341 }
41342diff -urNp linux-3.1.1/fs/exec.c linux-3.1.1/fs/exec.c
41343--- linux-3.1.1/fs/exec.c 2011-11-11 15:19:27.000000000 -0500
41344+++ linux-3.1.1/fs/exec.c 2011-11-16 23:41:58.000000000 -0500
41345@@ -55,12 +55,24 @@
41346 #include <linux/pipe_fs_i.h>
41347 #include <linux/oom.h>
41348 #include <linux/compat.h>
41349+#include <linux/random.h>
41350+#include <linux/seq_file.h>
41351+
41352+#ifdef CONFIG_PAX_REFCOUNT
41353+#include <linux/kallsyms.h>
41354+#include <linux/kdebug.h>
41355+#endif
41356
41357 #include <asm/uaccess.h>
41358 #include <asm/mmu_context.h>
41359 #include <asm/tlb.h>
41360 #include "internal.h"
41361
41362+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41363+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41364+EXPORT_SYMBOL(pax_set_initial_flags_func);
41365+#endif
41366+
41367 int core_uses_pid;
41368 char core_pattern[CORENAME_MAX_SIZE] = "core";
41369 unsigned int core_pipe_limit;
41370@@ -70,7 +82,7 @@ struct core_name {
41371 char *corename;
41372 int used, size;
41373 };
41374-static atomic_t call_count = ATOMIC_INIT(1);
41375+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41376
41377 /* The maximal length of core_pattern is also specified in sysctl.c */
41378
41379@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct
41380 int write)
41381 {
41382 struct page *page;
41383- int ret;
41384
41385-#ifdef CONFIG_STACK_GROWSUP
41386- if (write) {
41387- ret = expand_downwards(bprm->vma, pos);
41388- if (ret < 0)
41389- return NULL;
41390- }
41391-#endif
41392- ret = get_user_pages(current, bprm->mm, pos,
41393- 1, write, 1, &page, NULL);
41394- if (ret <= 0)
41395+ if (0 > expand_downwards(bprm->vma, pos))
41396+ return NULL;
41397+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41398 return NULL;
41399
41400 if (write) {
41401@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_b
41402 vma->vm_end = STACK_TOP_MAX;
41403 vma->vm_start = vma->vm_end - PAGE_SIZE;
41404 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41405+
41406+#ifdef CONFIG_PAX_SEGMEXEC
41407+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41408+#endif
41409+
41410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41411 INIT_LIST_HEAD(&vma->anon_vma_chain);
41412
41413@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_b
41414 mm->stack_vm = mm->total_vm = 1;
41415 up_write(&mm->mmap_sem);
41416 bprm->p = vma->vm_end - sizeof(void *);
41417+
41418+#ifdef CONFIG_PAX_RANDUSTACK
41419+ if (randomize_va_space)
41420+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41421+#endif
41422+
41423 return 0;
41424 err:
41425 up_write(&mm->mmap_sem);
41426@@ -396,19 +411,7 @@ err:
41427 return err;
41428 }
41429
41430-struct user_arg_ptr {
41431-#ifdef CONFIG_COMPAT
41432- bool is_compat;
41433-#endif
41434- union {
41435- const char __user *const __user *native;
41436-#ifdef CONFIG_COMPAT
41437- compat_uptr_t __user *compat;
41438-#endif
41439- } ptr;
41440-};
41441-
41442-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41443+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41444 {
41445 const char __user *native;
41446
41447@@ -417,14 +420,14 @@ static const char __user *get_user_arg_p
41448 compat_uptr_t compat;
41449
41450 if (get_user(compat, argv.ptr.compat + nr))
41451- return ERR_PTR(-EFAULT);
41452+ return (const char __force_user *)ERR_PTR(-EFAULT);
41453
41454 return compat_ptr(compat);
41455 }
41456 #endif
41457
41458 if (get_user(native, argv.ptr.native + nr))
41459- return ERR_PTR(-EFAULT);
41460+ return (const char __force_user *)ERR_PTR(-EFAULT);
41461
41462 return native;
41463 }
41464@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr arg
41465 if (!p)
41466 break;
41467
41468- if (IS_ERR(p))
41469+ if (IS_ERR((const char __force_kernel *)p))
41470 return -EFAULT;
41471
41472 if (i++ >= max)
41473@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct
41474
41475 ret = -EFAULT;
41476 str = get_user_arg_ptr(argv, argc);
41477- if (IS_ERR(str))
41478+ if (IS_ERR((const char __force_kernel *)str))
41479 goto out;
41480
41481 len = strnlen_user(str, MAX_ARG_STRLEN);
41482@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const
41483 int r;
41484 mm_segment_t oldfs = get_fs();
41485 struct user_arg_ptr argv = {
41486- .ptr.native = (const char __user *const __user *)__argv,
41487+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41488 };
41489
41490 set_fs(KERNEL_DS);
41491@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_are
41492 unsigned long new_end = old_end - shift;
41493 struct mmu_gather tlb;
41494
41495- BUG_ON(new_start > new_end);
41496+ if (new_start >= new_end || new_start < mmap_min_addr)
41497+ return -ENOMEM;
41498
41499 /*
41500 * ensure there are no vmas between where we want to go
41501@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_are
41502 if (vma != find_vma(mm, new_start))
41503 return -EFAULT;
41504
41505+#ifdef CONFIG_PAX_SEGMEXEC
41506+ BUG_ON(pax_find_mirror_vma(vma));
41507+#endif
41508+
41509 /*
41510 * cover the whole range: [new_start, old_end)
41511 */
41512@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm
41513 stack_top = arch_align_stack(stack_top);
41514 stack_top = PAGE_ALIGN(stack_top);
41515
41516- if (unlikely(stack_top < mmap_min_addr) ||
41517- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41518- return -ENOMEM;
41519-
41520 stack_shift = vma->vm_end - stack_top;
41521
41522 bprm->p -= stack_shift;
41523@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm
41524 bprm->exec -= stack_shift;
41525
41526 down_write(&mm->mmap_sem);
41527+
41528+ /* Move stack pages down in memory. */
41529+ if (stack_shift) {
41530+ ret = shift_arg_pages(vma, stack_shift);
41531+ if (ret)
41532+ goto out_unlock;
41533+ }
41534+
41535 vm_flags = VM_STACK_FLAGS;
41536
41537+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41538+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41539+ vm_flags &= ~VM_EXEC;
41540+
41541+#ifdef CONFIG_PAX_MPROTECT
41542+ if (mm->pax_flags & MF_PAX_MPROTECT)
41543+ vm_flags &= ~VM_MAYEXEC;
41544+#endif
41545+
41546+ }
41547+#endif
41548+
41549 /*
41550 * Adjust stack execute permissions; explicitly enable for
41551 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41552@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm
41553 goto out_unlock;
41554 BUG_ON(prev != vma);
41555
41556- /* Move stack pages down in memory. */
41557- if (stack_shift) {
41558- ret = shift_arg_pages(vma, stack_shift);
41559- if (ret)
41560- goto out_unlock;
41561- }
41562-
41563 /* mprotect_fixup is overkill to remove the temporary stack flags */
41564 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41565
41566@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_
41567 old_fs = get_fs();
41568 set_fs(get_ds());
41569 /* The cast to a user pointer is valid due to the set_fs() */
41570- result = vfs_read(file, (void __user *)addr, count, &pos);
41571+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41572 set_fs(old_fs);
41573 return result;
41574 }
41575@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binpr
41576 }
41577 rcu_read_unlock();
41578
41579- if (p->fs->users > n_fs) {
41580+ if (atomic_read(&p->fs->users) > n_fs) {
41581 bprm->unsafe |= LSM_UNSAFE_SHARE;
41582 } else {
41583 res = -EAGAIN;
41584@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *
41585 struct user_arg_ptr envp,
41586 struct pt_regs *regs)
41587 {
41588+#ifdef CONFIG_GRKERNSEC
41589+ struct file *old_exec_file;
41590+ struct acl_subject_label *old_acl;
41591+ struct rlimit old_rlim[RLIM_NLIMITS];
41592+#endif
41593 struct linux_binprm *bprm;
41594 struct file *file;
41595 struct files_struct *displaced;
41596@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *
41597 int retval;
41598 const struct cred *cred = current_cred();
41599
41600+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41601+
41602 /*
41603 * We move the actual failure in case of RLIMIT_NPROC excess from
41604 * set*uid() to execve() because too many poorly written programs
41605@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *
41606 bprm->filename = filename;
41607 bprm->interp = filename;
41608
41609+ if (gr_process_user_ban()) {
41610+ retval = -EPERM;
41611+ goto out_file;
41612+ }
41613+
41614+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41615+ retval = -EACCES;
41616+ goto out_file;
41617+ }
41618+
41619 retval = bprm_mm_init(bprm);
41620 if (retval)
41621 goto out_file;
41622@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *
41623 if (retval < 0)
41624 goto out;
41625
41626+ if (!gr_tpe_allow(file)) {
41627+ retval = -EACCES;
41628+ goto out;
41629+ }
41630+
41631+ if (gr_check_crash_exec(file)) {
41632+ retval = -EACCES;
41633+ goto out;
41634+ }
41635+
41636+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41637+
41638+ gr_handle_exec_args(bprm, argv);
41639+
41640+#ifdef CONFIG_GRKERNSEC
41641+ old_acl = current->acl;
41642+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41643+ old_exec_file = current->exec_file;
41644+ get_file(file);
41645+ current->exec_file = file;
41646+#endif
41647+
41648+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41649+ bprm->unsafe & LSM_UNSAFE_SHARE);
41650+ if (retval < 0)
41651+ goto out_fail;
41652+
41653 retval = search_binary_handler(bprm,regs);
41654 if (retval < 0)
41655- goto out;
41656+ goto out_fail;
41657+#ifdef CONFIG_GRKERNSEC
41658+ if (old_exec_file)
41659+ fput(old_exec_file);
41660+#endif
41661
41662 /* execve succeeded */
41663 current->fs->in_exec = 0;
41664@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *
41665 put_files_struct(displaced);
41666 return retval;
41667
41668+out_fail:
41669+#ifdef CONFIG_GRKERNSEC
41670+ current->acl = old_acl;
41671+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41672+ fput(current->exec_file);
41673+ current->exec_file = old_exec_file;
41674+#endif
41675+
41676 out:
41677 if (bprm->mm) {
41678 acct_arg_size(bprm, 0);
41679@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_n
41680 {
41681 char *old_corename = cn->corename;
41682
41683- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41684+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41685 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41686
41687 if (!cn->corename) {
41688@@ -1719,7 +1792,7 @@ static int format_corename(struct core_n
41689 int pid_in_pattern = 0;
41690 int err = 0;
41691
41692- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41693+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41694 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41695 cn->used = 0;
41696
41697@@ -1816,6 +1889,219 @@ out:
41698 return ispipe;
41699 }
41700
41701+int pax_check_flags(unsigned long *flags)
41702+{
41703+ int retval = 0;
41704+
41705+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41706+ if (*flags & MF_PAX_SEGMEXEC)
41707+ {
41708+ *flags &= ~MF_PAX_SEGMEXEC;
41709+ retval = -EINVAL;
41710+ }
41711+#endif
41712+
41713+ if ((*flags & MF_PAX_PAGEEXEC)
41714+
41715+#ifdef CONFIG_PAX_PAGEEXEC
41716+ && (*flags & MF_PAX_SEGMEXEC)
41717+#endif
41718+
41719+ )
41720+ {
41721+ *flags &= ~MF_PAX_PAGEEXEC;
41722+ retval = -EINVAL;
41723+ }
41724+
41725+ if ((*flags & MF_PAX_MPROTECT)
41726+
41727+#ifdef CONFIG_PAX_MPROTECT
41728+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41729+#endif
41730+
41731+ )
41732+ {
41733+ *flags &= ~MF_PAX_MPROTECT;
41734+ retval = -EINVAL;
41735+ }
41736+
41737+ if ((*flags & MF_PAX_EMUTRAMP)
41738+
41739+#ifdef CONFIG_PAX_EMUTRAMP
41740+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41741+#endif
41742+
41743+ )
41744+ {
41745+ *flags &= ~MF_PAX_EMUTRAMP;
41746+ retval = -EINVAL;
41747+ }
41748+
41749+ return retval;
41750+}
41751+
41752+EXPORT_SYMBOL(pax_check_flags);
41753+
41754+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41755+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41756+{
41757+ struct task_struct *tsk = current;
41758+ struct mm_struct *mm = current->mm;
41759+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41760+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41761+ char *path_exec = NULL;
41762+ char *path_fault = NULL;
41763+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41764+
41765+ if (buffer_exec && buffer_fault) {
41766+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41767+
41768+ down_read(&mm->mmap_sem);
41769+ vma = mm->mmap;
41770+ while (vma && (!vma_exec || !vma_fault)) {
41771+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41772+ vma_exec = vma;
41773+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41774+ vma_fault = vma;
41775+ vma = vma->vm_next;
41776+ }
41777+ if (vma_exec) {
41778+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41779+ if (IS_ERR(path_exec))
41780+ path_exec = "<path too long>";
41781+ else {
41782+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41783+ if (path_exec) {
41784+ *path_exec = 0;
41785+ path_exec = buffer_exec;
41786+ } else
41787+ path_exec = "<path too long>";
41788+ }
41789+ }
41790+ if (vma_fault) {
41791+ start = vma_fault->vm_start;
41792+ end = vma_fault->vm_end;
41793+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41794+ if (vma_fault->vm_file) {
41795+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41796+ if (IS_ERR(path_fault))
41797+ path_fault = "<path too long>";
41798+ else {
41799+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41800+ if (path_fault) {
41801+ *path_fault = 0;
41802+ path_fault = buffer_fault;
41803+ } else
41804+ path_fault = "<path too long>";
41805+ }
41806+ } else
41807+ path_fault = "<anonymous mapping>";
41808+ }
41809+ up_read(&mm->mmap_sem);
41810+ }
41811+ if (tsk->signal->curr_ip)
41812+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41813+ else
41814+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41815+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41816+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41817+ task_uid(tsk), task_euid(tsk), pc, sp);
41818+ free_page((unsigned long)buffer_exec);
41819+ free_page((unsigned long)buffer_fault);
41820+ pax_report_insns(regs, pc, sp);
41821+ do_coredump(SIGKILL, SIGKILL, regs);
41822+}
41823+#endif
41824+
41825+#ifdef CONFIG_PAX_REFCOUNT
41826+void pax_report_refcount_overflow(struct pt_regs *regs)
41827+{
41828+ if (current->signal->curr_ip)
41829+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41830+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41831+ else
41832+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41833+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41834+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41835+ show_regs(regs);
41836+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41837+}
41838+#endif
41839+
41840+#ifdef CONFIG_PAX_USERCOPY
41841+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41842+int object_is_on_stack(const void *obj, unsigned long len)
41843+{
41844+ const void * const stack = task_stack_page(current);
41845+ const void * const stackend = stack + THREAD_SIZE;
41846+
41847+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41848+ const void *frame = NULL;
41849+ const void *oldframe;
41850+#endif
41851+
41852+ if (obj + len < obj)
41853+ return -1;
41854+
41855+ if (obj + len <= stack || stackend <= obj)
41856+ return 0;
41857+
41858+ if (obj < stack || stackend < obj + len)
41859+ return -1;
41860+
41861+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41862+ oldframe = __builtin_frame_address(1);
41863+ if (oldframe)
41864+ frame = __builtin_frame_address(2);
41865+ /*
41866+ low ----------------------------------------------> high
41867+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41868+ ^----------------^
41869+ allow copies only within here
41870+ */
41871+ while (stack <= frame && frame < stackend) {
41872+ /* if obj + len extends past the last frame, this
41873+ check won't pass and the next frame will be 0,
41874+ causing us to bail out and correctly report
41875+ the copy as invalid
41876+ */
41877+ if (obj + len <= frame)
41878+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41879+ oldframe = frame;
41880+ frame = *(const void * const *)frame;
41881+ }
41882+ return -1;
41883+#else
41884+ return 1;
41885+#endif
41886+}
41887+
41888+
41889+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41890+{
41891+ if (current->signal->curr_ip)
41892+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41893+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41894+ else
41895+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41896+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41897+ dump_stack();
41898+ gr_handle_kernel_exploit();
41899+ do_group_exit(SIGKILL);
41900+}
41901+#endif
41902+
41903+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41904+void pax_track_stack(void)
41905+{
41906+ unsigned long sp = (unsigned long)&sp;
41907+ if (sp < current_thread_info()->lowest_stack &&
41908+ sp > (unsigned long)task_stack_page(current))
41909+ current_thread_info()->lowest_stack = sp;
41910+}
41911+EXPORT_SYMBOL(pax_track_stack);
41912+#endif
41913+
41914 static int zap_process(struct task_struct *start, int exit_code)
41915 {
41916 struct task_struct *t;
41917@@ -2027,17 +2313,17 @@ static void wait_for_dump_helpers(struct
41918 pipe = file->f_path.dentry->d_inode->i_pipe;
41919
41920 pipe_lock(pipe);
41921- pipe->readers++;
41922- pipe->writers--;
41923+ atomic_inc(&pipe->readers);
41924+ atomic_dec(&pipe->writers);
41925
41926- while ((pipe->readers > 1) && (!signal_pending(current))) {
41927+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41928 wake_up_interruptible_sync(&pipe->wait);
41929 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41930 pipe_wait(pipe);
41931 }
41932
41933- pipe->readers--;
41934- pipe->writers++;
41935+ atomic_dec(&pipe->readers);
41936+ atomic_inc(&pipe->writers);
41937 pipe_unlock(pipe);
41938
41939 }
41940@@ -2098,7 +2384,7 @@ void do_coredump(long signr, int exit_co
41941 int retval = 0;
41942 int flag = 0;
41943 int ispipe;
41944- static atomic_t core_dump_count = ATOMIC_INIT(0);
41945+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41946 struct coredump_params cprm = {
41947 .signr = signr,
41948 .regs = regs,
41949@@ -2113,6 +2399,9 @@ void do_coredump(long signr, int exit_co
41950
41951 audit_core_dumps(signr);
41952
41953+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41954+ gr_handle_brute_attach(current, cprm.mm_flags);
41955+
41956 binfmt = mm->binfmt;
41957 if (!binfmt || !binfmt->core_dump)
41958 goto fail;
41959@@ -2180,7 +2469,7 @@ void do_coredump(long signr, int exit_co
41960 }
41961 cprm.limit = RLIM_INFINITY;
41962
41963- dump_count = atomic_inc_return(&core_dump_count);
41964+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41965 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41966 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41967 task_tgid_vnr(current), current->comm);
41968@@ -2207,6 +2496,8 @@ void do_coredump(long signr, int exit_co
41969 } else {
41970 struct inode *inode;
41971
41972+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41973+
41974 if (cprm.limit < binfmt->min_coredump)
41975 goto fail_unlock;
41976
41977@@ -2250,7 +2541,7 @@ close_fail:
41978 filp_close(cprm.file, NULL);
41979 fail_dropcount:
41980 if (ispipe)
41981- atomic_dec(&core_dump_count);
41982+ atomic_dec_unchecked(&core_dump_count);
41983 fail_unlock:
41984 kfree(cn.corename);
41985 fail_corename:
41986@@ -2269,7 +2560,7 @@ fail:
41987 */
41988 int dump_write(struct file *file, const void *addr, int nr)
41989 {
41990- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41991+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41992 }
41993 EXPORT_SYMBOL(dump_write);
41994
41995diff -urNp linux-3.1.1/fs/ext2/balloc.c linux-3.1.1/fs/ext2/balloc.c
41996--- linux-3.1.1/fs/ext2/balloc.c 2011-11-11 15:19:27.000000000 -0500
41997+++ linux-3.1.1/fs/ext2/balloc.c 2011-11-16 18:40:29.000000000 -0500
41998@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41999
42000 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42001 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42002- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42003+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42004 sbi->s_resuid != current_fsuid() &&
42005 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42006 return 0;
42007diff -urNp linux-3.1.1/fs/ext3/balloc.c linux-3.1.1/fs/ext3/balloc.c
42008--- linux-3.1.1/fs/ext3/balloc.c 2011-11-11 15:19:27.000000000 -0500
42009+++ linux-3.1.1/fs/ext3/balloc.c 2011-11-16 18:40:29.000000000 -0500
42010@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct e
42011
42012 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42013 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42014- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42015+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42016 sbi->s_resuid != current_fsuid() &&
42017 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42018 return 0;
42019diff -urNp linux-3.1.1/fs/ext4/balloc.c linux-3.1.1/fs/ext4/balloc.c
42020--- linux-3.1.1/fs/ext4/balloc.c 2011-11-11 15:19:27.000000000 -0500
42021+++ linux-3.1.1/fs/ext4/balloc.c 2011-11-16 18:40:29.000000000 -0500
42022@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42023 /* Hm, nope. Are (enough) root reserved blocks available? */
42024 if (sbi->s_resuid == current_fsuid() ||
42025 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42026- capable(CAP_SYS_RESOURCE) ||
42027- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42028+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42029+ capable_nolog(CAP_SYS_RESOURCE)) {
42030
42031 if (free_blocks >= (nblocks + dirty_blocks))
42032 return 1;
42033diff -urNp linux-3.1.1/fs/ext4/ext4.h linux-3.1.1/fs/ext4/ext4.h
42034--- linux-3.1.1/fs/ext4/ext4.h 2011-11-11 15:19:27.000000000 -0500
42035+++ linux-3.1.1/fs/ext4/ext4.h 2011-11-16 18:39:08.000000000 -0500
42036@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
42037 unsigned long s_mb_last_start;
42038
42039 /* stats for buddy allocator */
42040- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42041- atomic_t s_bal_success; /* we found long enough chunks */
42042- atomic_t s_bal_allocated; /* in blocks */
42043- atomic_t s_bal_ex_scanned; /* total extents scanned */
42044- atomic_t s_bal_goals; /* goal hits */
42045- atomic_t s_bal_breaks; /* too long searches */
42046- atomic_t s_bal_2orders; /* 2^order hits */
42047+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42048+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42049+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42050+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42051+ atomic_unchecked_t s_bal_goals; /* goal hits */
42052+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42053+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42054 spinlock_t s_bal_lock;
42055 unsigned long s_mb_buddies_generated;
42056 unsigned long long s_mb_generation_time;
42057- atomic_t s_mb_lost_chunks;
42058- atomic_t s_mb_preallocated;
42059- atomic_t s_mb_discarded;
42060+ atomic_unchecked_t s_mb_lost_chunks;
42061+ atomic_unchecked_t s_mb_preallocated;
42062+ atomic_unchecked_t s_mb_discarded;
42063 atomic_t s_lock_busy;
42064
42065 /* locality groups */
42066diff -urNp linux-3.1.1/fs/ext4/file.c linux-3.1.1/fs/ext4/file.c
42067--- linux-3.1.1/fs/ext4/file.c 2011-11-11 15:19:27.000000000 -0500
42068+++ linux-3.1.1/fs/ext4/file.c 2011-11-16 18:40:29.000000000 -0500
42069@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42070 path.dentry = mnt->mnt_root;
42071 cp = d_path(&path, buf, sizeof(buf));
42072 if (!IS_ERR(cp)) {
42073- memcpy(sbi->s_es->s_last_mounted, cp,
42074- sizeof(sbi->s_es->s_last_mounted));
42075+ strlcpy(sbi->s_es->s_last_mounted, cp,
42076+ sizeof(sbi->s_es->s_last_mounted));
42077 ext4_mark_super_dirty(sb);
42078 }
42079 }
42080diff -urNp linux-3.1.1/fs/ext4/ioctl.c linux-3.1.1/fs/ext4/ioctl.c
42081--- linux-3.1.1/fs/ext4/ioctl.c 2011-11-11 15:19:27.000000000 -0500
42082+++ linux-3.1.1/fs/ext4/ioctl.c 2011-11-16 18:39:08.000000000 -0500
42083@@ -348,7 +348,7 @@ mext_out:
42084 if (!blk_queue_discard(q))
42085 return -EOPNOTSUPP;
42086
42087- if (copy_from_user(&range, (struct fstrim_range *)arg,
42088+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42089 sizeof(range)))
42090 return -EFAULT;
42091
42092@@ -358,7 +358,7 @@ mext_out:
42093 if (ret < 0)
42094 return ret;
42095
42096- if (copy_to_user((struct fstrim_range *)arg, &range,
42097+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
42098 sizeof(range)))
42099 return -EFAULT;
42100
42101diff -urNp linux-3.1.1/fs/ext4/mballoc.c linux-3.1.1/fs/ext4/mballoc.c
42102--- linux-3.1.1/fs/ext4/mballoc.c 2011-11-11 15:19:27.000000000 -0500
42103+++ linux-3.1.1/fs/ext4/mballoc.c 2011-11-16 18:40:29.000000000 -0500
42104@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ex
42105 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42106
42107 if (EXT4_SB(sb)->s_mb_stats)
42108- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42109+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42110
42111 break;
42112 }
42113@@ -2089,7 +2089,7 @@ repeat:
42114 ac->ac_status = AC_STATUS_CONTINUE;
42115 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42116 cr = 3;
42117- atomic_inc(&sbi->s_mb_lost_chunks);
42118+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42119 goto repeat;
42120 }
42121 }
42122@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struc
42123 ext4_grpblk_t counters[16];
42124 } sg;
42125
42126+ pax_track_stack();
42127+
42128 group--;
42129 if (group == 0)
42130 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
42131@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *
42132 if (sbi->s_mb_stats) {
42133 ext4_msg(sb, KERN_INFO,
42134 "mballoc: %u blocks %u reqs (%u success)",
42135- atomic_read(&sbi->s_bal_allocated),
42136- atomic_read(&sbi->s_bal_reqs),
42137- atomic_read(&sbi->s_bal_success));
42138+ atomic_read_unchecked(&sbi->s_bal_allocated),
42139+ atomic_read_unchecked(&sbi->s_bal_reqs),
42140+ atomic_read_unchecked(&sbi->s_bal_success));
42141 ext4_msg(sb, KERN_INFO,
42142 "mballoc: %u extents scanned, %u goal hits, "
42143 "%u 2^N hits, %u breaks, %u lost",
42144- atomic_read(&sbi->s_bal_ex_scanned),
42145- atomic_read(&sbi->s_bal_goals),
42146- atomic_read(&sbi->s_bal_2orders),
42147- atomic_read(&sbi->s_bal_breaks),
42148- atomic_read(&sbi->s_mb_lost_chunks));
42149+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42150+ atomic_read_unchecked(&sbi->s_bal_goals),
42151+ atomic_read_unchecked(&sbi->s_bal_2orders),
42152+ atomic_read_unchecked(&sbi->s_bal_breaks),
42153+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42154 ext4_msg(sb, KERN_INFO,
42155 "mballoc: %lu generated and it took %Lu",
42156 sbi->s_mb_buddies_generated,
42157 sbi->s_mb_generation_time);
42158 ext4_msg(sb, KERN_INFO,
42159 "mballoc: %u preallocated, %u discarded",
42160- atomic_read(&sbi->s_mb_preallocated),
42161- atomic_read(&sbi->s_mb_discarded));
42162+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42163+ atomic_read_unchecked(&sbi->s_mb_discarded));
42164 }
42165
42166 free_percpu(sbi->s_locality_groups);
42167@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct
42168 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42169
42170 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42171- atomic_inc(&sbi->s_bal_reqs);
42172- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42173+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42174+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42175 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42176- atomic_inc(&sbi->s_bal_success);
42177- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42178+ atomic_inc_unchecked(&sbi->s_bal_success);
42179+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42180 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42181 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42182- atomic_inc(&sbi->s_bal_goals);
42183+ atomic_inc_unchecked(&sbi->s_bal_goals);
42184 if (ac->ac_found > sbi->s_mb_max_to_scan)
42185- atomic_inc(&sbi->s_bal_breaks);
42186+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42187 }
42188
42189 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42190@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42191 trace_ext4_mb_new_inode_pa(ac, pa);
42192
42193 ext4_mb_use_inode_pa(ac, pa);
42194- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42195+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42196
42197 ei = EXT4_I(ac->ac_inode);
42198 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42199@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42200 trace_ext4_mb_new_group_pa(ac, pa);
42201
42202 ext4_mb_use_group_pa(ac, pa);
42203- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42204+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42205
42206 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42207 lg = ac->ac_lg;
42208@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42209 * from the bitmap and continue.
42210 */
42211 }
42212- atomic_add(free, &sbi->s_mb_discarded);
42213+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42214
42215 return err;
42216 }
42217@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42218 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42219 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42220 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42221- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42222+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42223 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42224
42225 return 0;
42226diff -urNp linux-3.1.1/fs/fcntl.c linux-3.1.1/fs/fcntl.c
42227--- linux-3.1.1/fs/fcntl.c 2011-11-11 15:19:27.000000000 -0500
42228+++ linux-3.1.1/fs/fcntl.c 2011-11-16 23:40:25.000000000 -0500
42229@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
42230 if (err)
42231 return err;
42232
42233+ if (gr_handle_chroot_fowner(pid, type))
42234+ return -ENOENT;
42235+ if (gr_check_protected_task_fowner(pid, type))
42236+ return -EACCES;
42237+
42238 f_modown(filp, pid, type, force);
42239 return 0;
42240 }
42241@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42242
42243 static int f_setown_ex(struct file *filp, unsigned long arg)
42244 {
42245- struct f_owner_ex * __user owner_p = (void * __user)arg;
42246+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42247 struct f_owner_ex owner;
42248 struct pid *pid;
42249 int type;
42250@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
42251
42252 static int f_getown_ex(struct file *filp, unsigned long arg)
42253 {
42254- struct f_owner_ex * __user owner_p = (void * __user)arg;
42255+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42256 struct f_owner_ex owner;
42257 int ret = 0;
42258
42259@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
42260 switch (cmd) {
42261 case F_DUPFD:
42262 case F_DUPFD_CLOEXEC:
42263+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42264 if (arg >= rlimit(RLIMIT_NOFILE))
42265 break;
42266 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42267diff -urNp linux-3.1.1/fs/fifo.c linux-3.1.1/fs/fifo.c
42268--- linux-3.1.1/fs/fifo.c 2011-11-11 15:19:27.000000000 -0500
42269+++ linux-3.1.1/fs/fifo.c 2011-11-16 18:39:08.000000000 -0500
42270@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
42271 */
42272 filp->f_op = &read_pipefifo_fops;
42273 pipe->r_counter++;
42274- if (pipe->readers++ == 0)
42275+ if (atomic_inc_return(&pipe->readers) == 1)
42276 wake_up_partner(inode);
42277
42278- if (!pipe->writers) {
42279+ if (!atomic_read(&pipe->writers)) {
42280 if ((filp->f_flags & O_NONBLOCK)) {
42281 /* suppress POLLHUP until we have
42282 * seen a writer */
42283@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42284 * errno=ENXIO when there is no process reading the FIFO.
42285 */
42286 ret = -ENXIO;
42287- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42288+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42289 goto err;
42290
42291 filp->f_op = &write_pipefifo_fops;
42292 pipe->w_counter++;
42293- if (!pipe->writers++)
42294+ if (atomic_inc_return(&pipe->writers) == 1)
42295 wake_up_partner(inode);
42296
42297- if (!pipe->readers) {
42298+ if (!atomic_read(&pipe->readers)) {
42299 wait_for_partner(inode, &pipe->r_counter);
42300 if (signal_pending(current))
42301 goto err_wr;
42302@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42303 */
42304 filp->f_op = &rdwr_pipefifo_fops;
42305
42306- pipe->readers++;
42307- pipe->writers++;
42308+ atomic_inc(&pipe->readers);
42309+ atomic_inc(&pipe->writers);
42310 pipe->r_counter++;
42311 pipe->w_counter++;
42312- if (pipe->readers == 1 || pipe->writers == 1)
42313+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42314 wake_up_partner(inode);
42315 break;
42316
42317@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42318 return 0;
42319
42320 err_rd:
42321- if (!--pipe->readers)
42322+ if (atomic_dec_and_test(&pipe->readers))
42323 wake_up_interruptible(&pipe->wait);
42324 ret = -ERESTARTSYS;
42325 goto err;
42326
42327 err_wr:
42328- if (!--pipe->writers)
42329+ if (atomic_dec_and_test(&pipe->writers))
42330 wake_up_interruptible(&pipe->wait);
42331 ret = -ERESTARTSYS;
42332 goto err;
42333
42334 err:
42335- if (!pipe->readers && !pipe->writers)
42336+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42337 free_pipe_info(inode);
42338
42339 err_nocleanup:
42340diff -urNp linux-3.1.1/fs/file.c linux-3.1.1/fs/file.c
42341--- linux-3.1.1/fs/file.c 2011-11-11 15:19:27.000000000 -0500
42342+++ linux-3.1.1/fs/file.c 2011-11-16 18:40:29.000000000 -0500
42343@@ -15,6 +15,7 @@
42344 #include <linux/slab.h>
42345 #include <linux/vmalloc.h>
42346 #include <linux/file.h>
42347+#include <linux/security.h>
42348 #include <linux/fdtable.h>
42349 #include <linux/bitops.h>
42350 #include <linux/interrupt.h>
42351@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42352 * N.B. For clone tasks sharing a files structure, this test
42353 * will limit the total number of files that can be opened.
42354 */
42355+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42356 if (nr >= rlimit(RLIMIT_NOFILE))
42357 return -EMFILE;
42358
42359diff -urNp linux-3.1.1/fs/filesystems.c linux-3.1.1/fs/filesystems.c
42360--- linux-3.1.1/fs/filesystems.c 2011-11-11 15:19:27.000000000 -0500
42361+++ linux-3.1.1/fs/filesystems.c 2011-11-16 18:40:29.000000000 -0500
42362@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42363 int len = dot ? dot - name : strlen(name);
42364
42365 fs = __get_fs_type(name, len);
42366+
42367+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42368+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42369+#else
42370 if (!fs && (request_module("%.*s", len, name) == 0))
42371+#endif
42372 fs = __get_fs_type(name, len);
42373
42374 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42375diff -urNp linux-3.1.1/fs/fscache/cookie.c linux-3.1.1/fs/fscache/cookie.c
42376--- linux-3.1.1/fs/fscache/cookie.c 2011-11-11 15:19:27.000000000 -0500
42377+++ linux-3.1.1/fs/fscache/cookie.c 2011-11-16 18:39:08.000000000 -0500
42378@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42379 parent ? (char *) parent->def->name : "<no-parent>",
42380 def->name, netfs_data);
42381
42382- fscache_stat(&fscache_n_acquires);
42383+ fscache_stat_unchecked(&fscache_n_acquires);
42384
42385 /* if there's no parent cookie, then we don't create one here either */
42386 if (!parent) {
42387- fscache_stat(&fscache_n_acquires_null);
42388+ fscache_stat_unchecked(&fscache_n_acquires_null);
42389 _leave(" [no parent]");
42390 return NULL;
42391 }
42392@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42393 /* allocate and initialise a cookie */
42394 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42395 if (!cookie) {
42396- fscache_stat(&fscache_n_acquires_oom);
42397+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42398 _leave(" [ENOMEM]");
42399 return NULL;
42400 }
42401@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42402
42403 switch (cookie->def->type) {
42404 case FSCACHE_COOKIE_TYPE_INDEX:
42405- fscache_stat(&fscache_n_cookie_index);
42406+ fscache_stat_unchecked(&fscache_n_cookie_index);
42407 break;
42408 case FSCACHE_COOKIE_TYPE_DATAFILE:
42409- fscache_stat(&fscache_n_cookie_data);
42410+ fscache_stat_unchecked(&fscache_n_cookie_data);
42411 break;
42412 default:
42413- fscache_stat(&fscache_n_cookie_special);
42414+ fscache_stat_unchecked(&fscache_n_cookie_special);
42415 break;
42416 }
42417
42418@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42419 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42420 atomic_dec(&parent->n_children);
42421 __fscache_cookie_put(cookie);
42422- fscache_stat(&fscache_n_acquires_nobufs);
42423+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42424 _leave(" = NULL");
42425 return NULL;
42426 }
42427 }
42428
42429- fscache_stat(&fscache_n_acquires_ok);
42430+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42431 _leave(" = %p", cookie);
42432 return cookie;
42433 }
42434@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42435 cache = fscache_select_cache_for_object(cookie->parent);
42436 if (!cache) {
42437 up_read(&fscache_addremove_sem);
42438- fscache_stat(&fscache_n_acquires_no_cache);
42439+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42440 _leave(" = -ENOMEDIUM [no cache]");
42441 return -ENOMEDIUM;
42442 }
42443@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42444 object = cache->ops->alloc_object(cache, cookie);
42445 fscache_stat_d(&fscache_n_cop_alloc_object);
42446 if (IS_ERR(object)) {
42447- fscache_stat(&fscache_n_object_no_alloc);
42448+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42449 ret = PTR_ERR(object);
42450 goto error;
42451 }
42452
42453- fscache_stat(&fscache_n_object_alloc);
42454+ fscache_stat_unchecked(&fscache_n_object_alloc);
42455
42456 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42457
42458@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42459 struct fscache_object *object;
42460 struct hlist_node *_p;
42461
42462- fscache_stat(&fscache_n_updates);
42463+ fscache_stat_unchecked(&fscache_n_updates);
42464
42465 if (!cookie) {
42466- fscache_stat(&fscache_n_updates_null);
42467+ fscache_stat_unchecked(&fscache_n_updates_null);
42468 _leave(" [no cookie]");
42469 return;
42470 }
42471@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42472 struct fscache_object *object;
42473 unsigned long event;
42474
42475- fscache_stat(&fscache_n_relinquishes);
42476+ fscache_stat_unchecked(&fscache_n_relinquishes);
42477 if (retire)
42478- fscache_stat(&fscache_n_relinquishes_retire);
42479+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42480
42481 if (!cookie) {
42482- fscache_stat(&fscache_n_relinquishes_null);
42483+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42484 _leave(" [no cookie]");
42485 return;
42486 }
42487@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42488
42489 /* wait for the cookie to finish being instantiated (or to fail) */
42490 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42491- fscache_stat(&fscache_n_relinquishes_waitcrt);
42492+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42493 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42494 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42495 }
42496diff -urNp linux-3.1.1/fs/fscache/internal.h linux-3.1.1/fs/fscache/internal.h
42497--- linux-3.1.1/fs/fscache/internal.h 2011-11-11 15:19:27.000000000 -0500
42498+++ linux-3.1.1/fs/fscache/internal.h 2011-11-16 18:39:08.000000000 -0500
42499@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42500 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42501 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42502
42503-extern atomic_t fscache_n_op_pend;
42504-extern atomic_t fscache_n_op_run;
42505-extern atomic_t fscache_n_op_enqueue;
42506-extern atomic_t fscache_n_op_deferred_release;
42507-extern atomic_t fscache_n_op_release;
42508-extern atomic_t fscache_n_op_gc;
42509-extern atomic_t fscache_n_op_cancelled;
42510-extern atomic_t fscache_n_op_rejected;
42511-
42512-extern atomic_t fscache_n_attr_changed;
42513-extern atomic_t fscache_n_attr_changed_ok;
42514-extern atomic_t fscache_n_attr_changed_nobufs;
42515-extern atomic_t fscache_n_attr_changed_nomem;
42516-extern atomic_t fscache_n_attr_changed_calls;
42517-
42518-extern atomic_t fscache_n_allocs;
42519-extern atomic_t fscache_n_allocs_ok;
42520-extern atomic_t fscache_n_allocs_wait;
42521-extern atomic_t fscache_n_allocs_nobufs;
42522-extern atomic_t fscache_n_allocs_intr;
42523-extern atomic_t fscache_n_allocs_object_dead;
42524-extern atomic_t fscache_n_alloc_ops;
42525-extern atomic_t fscache_n_alloc_op_waits;
42526-
42527-extern atomic_t fscache_n_retrievals;
42528-extern atomic_t fscache_n_retrievals_ok;
42529-extern atomic_t fscache_n_retrievals_wait;
42530-extern atomic_t fscache_n_retrievals_nodata;
42531-extern atomic_t fscache_n_retrievals_nobufs;
42532-extern atomic_t fscache_n_retrievals_intr;
42533-extern atomic_t fscache_n_retrievals_nomem;
42534-extern atomic_t fscache_n_retrievals_object_dead;
42535-extern atomic_t fscache_n_retrieval_ops;
42536-extern atomic_t fscache_n_retrieval_op_waits;
42537-
42538-extern atomic_t fscache_n_stores;
42539-extern atomic_t fscache_n_stores_ok;
42540-extern atomic_t fscache_n_stores_again;
42541-extern atomic_t fscache_n_stores_nobufs;
42542-extern atomic_t fscache_n_stores_oom;
42543-extern atomic_t fscache_n_store_ops;
42544-extern atomic_t fscache_n_store_calls;
42545-extern atomic_t fscache_n_store_pages;
42546-extern atomic_t fscache_n_store_radix_deletes;
42547-extern atomic_t fscache_n_store_pages_over_limit;
42548-
42549-extern atomic_t fscache_n_store_vmscan_not_storing;
42550-extern atomic_t fscache_n_store_vmscan_gone;
42551-extern atomic_t fscache_n_store_vmscan_busy;
42552-extern atomic_t fscache_n_store_vmscan_cancelled;
42553-
42554-extern atomic_t fscache_n_marks;
42555-extern atomic_t fscache_n_uncaches;
42556-
42557-extern atomic_t fscache_n_acquires;
42558-extern atomic_t fscache_n_acquires_null;
42559-extern atomic_t fscache_n_acquires_no_cache;
42560-extern atomic_t fscache_n_acquires_ok;
42561-extern atomic_t fscache_n_acquires_nobufs;
42562-extern atomic_t fscache_n_acquires_oom;
42563-
42564-extern atomic_t fscache_n_updates;
42565-extern atomic_t fscache_n_updates_null;
42566-extern atomic_t fscache_n_updates_run;
42567-
42568-extern atomic_t fscache_n_relinquishes;
42569-extern atomic_t fscache_n_relinquishes_null;
42570-extern atomic_t fscache_n_relinquishes_waitcrt;
42571-extern atomic_t fscache_n_relinquishes_retire;
42572-
42573-extern atomic_t fscache_n_cookie_index;
42574-extern atomic_t fscache_n_cookie_data;
42575-extern atomic_t fscache_n_cookie_special;
42576-
42577-extern atomic_t fscache_n_object_alloc;
42578-extern atomic_t fscache_n_object_no_alloc;
42579-extern atomic_t fscache_n_object_lookups;
42580-extern atomic_t fscache_n_object_lookups_negative;
42581-extern atomic_t fscache_n_object_lookups_positive;
42582-extern atomic_t fscache_n_object_lookups_timed_out;
42583-extern atomic_t fscache_n_object_created;
42584-extern atomic_t fscache_n_object_avail;
42585-extern atomic_t fscache_n_object_dead;
42586-
42587-extern atomic_t fscache_n_checkaux_none;
42588-extern atomic_t fscache_n_checkaux_okay;
42589-extern atomic_t fscache_n_checkaux_update;
42590-extern atomic_t fscache_n_checkaux_obsolete;
42591+extern atomic_unchecked_t fscache_n_op_pend;
42592+extern atomic_unchecked_t fscache_n_op_run;
42593+extern atomic_unchecked_t fscache_n_op_enqueue;
42594+extern atomic_unchecked_t fscache_n_op_deferred_release;
42595+extern atomic_unchecked_t fscache_n_op_release;
42596+extern atomic_unchecked_t fscache_n_op_gc;
42597+extern atomic_unchecked_t fscache_n_op_cancelled;
42598+extern atomic_unchecked_t fscache_n_op_rejected;
42599+
42600+extern atomic_unchecked_t fscache_n_attr_changed;
42601+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42602+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42603+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42604+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42605+
42606+extern atomic_unchecked_t fscache_n_allocs;
42607+extern atomic_unchecked_t fscache_n_allocs_ok;
42608+extern atomic_unchecked_t fscache_n_allocs_wait;
42609+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42610+extern atomic_unchecked_t fscache_n_allocs_intr;
42611+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42612+extern atomic_unchecked_t fscache_n_alloc_ops;
42613+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42614+
42615+extern atomic_unchecked_t fscache_n_retrievals;
42616+extern atomic_unchecked_t fscache_n_retrievals_ok;
42617+extern atomic_unchecked_t fscache_n_retrievals_wait;
42618+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42619+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42620+extern atomic_unchecked_t fscache_n_retrievals_intr;
42621+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42622+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42623+extern atomic_unchecked_t fscache_n_retrieval_ops;
42624+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42625+
42626+extern atomic_unchecked_t fscache_n_stores;
42627+extern atomic_unchecked_t fscache_n_stores_ok;
42628+extern atomic_unchecked_t fscache_n_stores_again;
42629+extern atomic_unchecked_t fscache_n_stores_nobufs;
42630+extern atomic_unchecked_t fscache_n_stores_oom;
42631+extern atomic_unchecked_t fscache_n_store_ops;
42632+extern atomic_unchecked_t fscache_n_store_calls;
42633+extern atomic_unchecked_t fscache_n_store_pages;
42634+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42635+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42636+
42637+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42638+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42639+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42640+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42641+
42642+extern atomic_unchecked_t fscache_n_marks;
42643+extern atomic_unchecked_t fscache_n_uncaches;
42644+
42645+extern atomic_unchecked_t fscache_n_acquires;
42646+extern atomic_unchecked_t fscache_n_acquires_null;
42647+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42648+extern atomic_unchecked_t fscache_n_acquires_ok;
42649+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42650+extern atomic_unchecked_t fscache_n_acquires_oom;
42651+
42652+extern atomic_unchecked_t fscache_n_updates;
42653+extern atomic_unchecked_t fscache_n_updates_null;
42654+extern atomic_unchecked_t fscache_n_updates_run;
42655+
42656+extern atomic_unchecked_t fscache_n_relinquishes;
42657+extern atomic_unchecked_t fscache_n_relinquishes_null;
42658+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42659+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42660+
42661+extern atomic_unchecked_t fscache_n_cookie_index;
42662+extern atomic_unchecked_t fscache_n_cookie_data;
42663+extern atomic_unchecked_t fscache_n_cookie_special;
42664+
42665+extern atomic_unchecked_t fscache_n_object_alloc;
42666+extern atomic_unchecked_t fscache_n_object_no_alloc;
42667+extern atomic_unchecked_t fscache_n_object_lookups;
42668+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42669+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42670+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42671+extern atomic_unchecked_t fscache_n_object_created;
42672+extern atomic_unchecked_t fscache_n_object_avail;
42673+extern atomic_unchecked_t fscache_n_object_dead;
42674+
42675+extern atomic_unchecked_t fscache_n_checkaux_none;
42676+extern atomic_unchecked_t fscache_n_checkaux_okay;
42677+extern atomic_unchecked_t fscache_n_checkaux_update;
42678+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42679
42680 extern atomic_t fscache_n_cop_alloc_object;
42681 extern atomic_t fscache_n_cop_lookup_object;
42682@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42683 atomic_inc(stat);
42684 }
42685
42686+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42687+{
42688+ atomic_inc_unchecked(stat);
42689+}
42690+
42691 static inline void fscache_stat_d(atomic_t *stat)
42692 {
42693 atomic_dec(stat);
42694@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42695
42696 #define __fscache_stat(stat) (NULL)
42697 #define fscache_stat(stat) do {} while (0)
42698+#define fscache_stat_unchecked(stat) do {} while (0)
42699 #define fscache_stat_d(stat) do {} while (0)
42700 #endif
42701
42702diff -urNp linux-3.1.1/fs/fscache/object.c linux-3.1.1/fs/fscache/object.c
42703--- linux-3.1.1/fs/fscache/object.c 2011-11-11 15:19:27.000000000 -0500
42704+++ linux-3.1.1/fs/fscache/object.c 2011-11-16 18:39:08.000000000 -0500
42705@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42706 /* update the object metadata on disk */
42707 case FSCACHE_OBJECT_UPDATING:
42708 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42709- fscache_stat(&fscache_n_updates_run);
42710+ fscache_stat_unchecked(&fscache_n_updates_run);
42711 fscache_stat(&fscache_n_cop_update_object);
42712 object->cache->ops->update_object(object);
42713 fscache_stat_d(&fscache_n_cop_update_object);
42714@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42715 spin_lock(&object->lock);
42716 object->state = FSCACHE_OBJECT_DEAD;
42717 spin_unlock(&object->lock);
42718- fscache_stat(&fscache_n_object_dead);
42719+ fscache_stat_unchecked(&fscache_n_object_dead);
42720 goto terminal_transit;
42721
42722 /* handle the parent cache of this object being withdrawn from
42723@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42724 spin_lock(&object->lock);
42725 object->state = FSCACHE_OBJECT_DEAD;
42726 spin_unlock(&object->lock);
42727- fscache_stat(&fscache_n_object_dead);
42728+ fscache_stat_unchecked(&fscache_n_object_dead);
42729 goto terminal_transit;
42730
42731 /* complain about the object being woken up once it is
42732@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42733 parent->cookie->def->name, cookie->def->name,
42734 object->cache->tag->name);
42735
42736- fscache_stat(&fscache_n_object_lookups);
42737+ fscache_stat_unchecked(&fscache_n_object_lookups);
42738 fscache_stat(&fscache_n_cop_lookup_object);
42739 ret = object->cache->ops->lookup_object(object);
42740 fscache_stat_d(&fscache_n_cop_lookup_object);
42741@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42742 if (ret == -ETIMEDOUT) {
42743 /* probably stuck behind another object, so move this one to
42744 * the back of the queue */
42745- fscache_stat(&fscache_n_object_lookups_timed_out);
42746+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42747 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42748 }
42749
42750@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42751
42752 spin_lock(&object->lock);
42753 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42754- fscache_stat(&fscache_n_object_lookups_negative);
42755+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42756
42757 /* transit here to allow write requests to begin stacking up
42758 * and read requests to begin returning ENODATA */
42759@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42760 * result, in which case there may be data available */
42761 spin_lock(&object->lock);
42762 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42763- fscache_stat(&fscache_n_object_lookups_positive);
42764+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42765
42766 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42767
42768@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42769 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42770 } else {
42771 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42772- fscache_stat(&fscache_n_object_created);
42773+ fscache_stat_unchecked(&fscache_n_object_created);
42774
42775 object->state = FSCACHE_OBJECT_AVAILABLE;
42776 spin_unlock(&object->lock);
42777@@ -602,7 +602,7 @@ static void fscache_object_available(str
42778 fscache_enqueue_dependents(object);
42779
42780 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42781- fscache_stat(&fscache_n_object_avail);
42782+ fscache_stat_unchecked(&fscache_n_object_avail);
42783
42784 _leave("");
42785 }
42786@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42787 enum fscache_checkaux result;
42788
42789 if (!object->cookie->def->check_aux) {
42790- fscache_stat(&fscache_n_checkaux_none);
42791+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42792 return FSCACHE_CHECKAUX_OKAY;
42793 }
42794
42795@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42796 switch (result) {
42797 /* entry okay as is */
42798 case FSCACHE_CHECKAUX_OKAY:
42799- fscache_stat(&fscache_n_checkaux_okay);
42800+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42801 break;
42802
42803 /* entry requires update */
42804 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42805- fscache_stat(&fscache_n_checkaux_update);
42806+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42807 break;
42808
42809 /* entry requires deletion */
42810 case FSCACHE_CHECKAUX_OBSOLETE:
42811- fscache_stat(&fscache_n_checkaux_obsolete);
42812+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42813 break;
42814
42815 default:
42816diff -urNp linux-3.1.1/fs/fscache/operation.c linux-3.1.1/fs/fscache/operation.c
42817--- linux-3.1.1/fs/fscache/operation.c 2011-11-11 15:19:27.000000000 -0500
42818+++ linux-3.1.1/fs/fscache/operation.c 2011-11-16 18:39:08.000000000 -0500
42819@@ -17,7 +17,7 @@
42820 #include <linux/slab.h>
42821 #include "internal.h"
42822
42823-atomic_t fscache_op_debug_id;
42824+atomic_unchecked_t fscache_op_debug_id;
42825 EXPORT_SYMBOL(fscache_op_debug_id);
42826
42827 /**
42828@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42829 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42830 ASSERTCMP(atomic_read(&op->usage), >, 0);
42831
42832- fscache_stat(&fscache_n_op_enqueue);
42833+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42834 switch (op->flags & FSCACHE_OP_TYPE) {
42835 case FSCACHE_OP_ASYNC:
42836 _debug("queue async");
42837@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42838 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42839 if (op->processor)
42840 fscache_enqueue_operation(op);
42841- fscache_stat(&fscache_n_op_run);
42842+ fscache_stat_unchecked(&fscache_n_op_run);
42843 }
42844
42845 /*
42846@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42847 if (object->n_ops > 1) {
42848 atomic_inc(&op->usage);
42849 list_add_tail(&op->pend_link, &object->pending_ops);
42850- fscache_stat(&fscache_n_op_pend);
42851+ fscache_stat_unchecked(&fscache_n_op_pend);
42852 } else if (!list_empty(&object->pending_ops)) {
42853 atomic_inc(&op->usage);
42854 list_add_tail(&op->pend_link, &object->pending_ops);
42855- fscache_stat(&fscache_n_op_pend);
42856+ fscache_stat_unchecked(&fscache_n_op_pend);
42857 fscache_start_operations(object);
42858 } else {
42859 ASSERTCMP(object->n_in_progress, ==, 0);
42860@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42861 object->n_exclusive++; /* reads and writes must wait */
42862 atomic_inc(&op->usage);
42863 list_add_tail(&op->pend_link, &object->pending_ops);
42864- fscache_stat(&fscache_n_op_pend);
42865+ fscache_stat_unchecked(&fscache_n_op_pend);
42866 ret = 0;
42867 } else {
42868 /* not allowed to submit ops in any other state */
42869@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42870 if (object->n_exclusive > 0) {
42871 atomic_inc(&op->usage);
42872 list_add_tail(&op->pend_link, &object->pending_ops);
42873- fscache_stat(&fscache_n_op_pend);
42874+ fscache_stat_unchecked(&fscache_n_op_pend);
42875 } else if (!list_empty(&object->pending_ops)) {
42876 atomic_inc(&op->usage);
42877 list_add_tail(&op->pend_link, &object->pending_ops);
42878- fscache_stat(&fscache_n_op_pend);
42879+ fscache_stat_unchecked(&fscache_n_op_pend);
42880 fscache_start_operations(object);
42881 } else {
42882 ASSERTCMP(object->n_exclusive, ==, 0);
42883@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42884 object->n_ops++;
42885 atomic_inc(&op->usage);
42886 list_add_tail(&op->pend_link, &object->pending_ops);
42887- fscache_stat(&fscache_n_op_pend);
42888+ fscache_stat_unchecked(&fscache_n_op_pend);
42889 ret = 0;
42890 } else if (object->state == FSCACHE_OBJECT_DYING ||
42891 object->state == FSCACHE_OBJECT_LC_DYING ||
42892 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42893- fscache_stat(&fscache_n_op_rejected);
42894+ fscache_stat_unchecked(&fscache_n_op_rejected);
42895 ret = -ENOBUFS;
42896 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42897 fscache_report_unexpected_submission(object, op, ostate);
42898@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42899
42900 ret = -EBUSY;
42901 if (!list_empty(&op->pend_link)) {
42902- fscache_stat(&fscache_n_op_cancelled);
42903+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42904 list_del_init(&op->pend_link);
42905 object->n_ops--;
42906 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42907@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42908 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42909 BUG();
42910
42911- fscache_stat(&fscache_n_op_release);
42912+ fscache_stat_unchecked(&fscache_n_op_release);
42913
42914 if (op->release) {
42915 op->release(op);
42916@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42917 * lock, and defer it otherwise */
42918 if (!spin_trylock(&object->lock)) {
42919 _debug("defer put");
42920- fscache_stat(&fscache_n_op_deferred_release);
42921+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42922
42923 cache = object->cache;
42924 spin_lock(&cache->op_gc_list_lock);
42925@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42926
42927 _debug("GC DEFERRED REL OBJ%x OP%x",
42928 object->debug_id, op->debug_id);
42929- fscache_stat(&fscache_n_op_gc);
42930+ fscache_stat_unchecked(&fscache_n_op_gc);
42931
42932 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42933
42934diff -urNp linux-3.1.1/fs/fscache/page.c linux-3.1.1/fs/fscache/page.c
42935--- linux-3.1.1/fs/fscache/page.c 2011-11-11 15:19:27.000000000 -0500
42936+++ linux-3.1.1/fs/fscache/page.c 2011-11-16 18:39:08.000000000 -0500
42937@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42938 val = radix_tree_lookup(&cookie->stores, page->index);
42939 if (!val) {
42940 rcu_read_unlock();
42941- fscache_stat(&fscache_n_store_vmscan_not_storing);
42942+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42943 __fscache_uncache_page(cookie, page);
42944 return true;
42945 }
42946@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42947 spin_unlock(&cookie->stores_lock);
42948
42949 if (xpage) {
42950- fscache_stat(&fscache_n_store_vmscan_cancelled);
42951- fscache_stat(&fscache_n_store_radix_deletes);
42952+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42953+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42954 ASSERTCMP(xpage, ==, page);
42955 } else {
42956- fscache_stat(&fscache_n_store_vmscan_gone);
42957+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42958 }
42959
42960 wake_up_bit(&cookie->flags, 0);
42961@@ -107,7 +107,7 @@ page_busy:
42962 /* we might want to wait here, but that could deadlock the allocator as
42963 * the work threads writing to the cache may all end up sleeping
42964 * on memory allocation */
42965- fscache_stat(&fscache_n_store_vmscan_busy);
42966+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42967 return false;
42968 }
42969 EXPORT_SYMBOL(__fscache_maybe_release_page);
42970@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42971 FSCACHE_COOKIE_STORING_TAG);
42972 if (!radix_tree_tag_get(&cookie->stores, page->index,
42973 FSCACHE_COOKIE_PENDING_TAG)) {
42974- fscache_stat(&fscache_n_store_radix_deletes);
42975+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42976 xpage = radix_tree_delete(&cookie->stores, page->index);
42977 }
42978 spin_unlock(&cookie->stores_lock);
42979@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42980
42981 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42982
42983- fscache_stat(&fscache_n_attr_changed_calls);
42984+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42985
42986 if (fscache_object_is_active(object)) {
42987 fscache_stat(&fscache_n_cop_attr_changed);
42988@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42989
42990 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42991
42992- fscache_stat(&fscache_n_attr_changed);
42993+ fscache_stat_unchecked(&fscache_n_attr_changed);
42994
42995 op = kzalloc(sizeof(*op), GFP_KERNEL);
42996 if (!op) {
42997- fscache_stat(&fscache_n_attr_changed_nomem);
42998+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42999 _leave(" = -ENOMEM");
43000 return -ENOMEM;
43001 }
43002@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
43003 if (fscache_submit_exclusive_op(object, op) < 0)
43004 goto nobufs;
43005 spin_unlock(&cookie->lock);
43006- fscache_stat(&fscache_n_attr_changed_ok);
43007+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43008 fscache_put_operation(op);
43009 _leave(" = 0");
43010 return 0;
43011@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
43012 nobufs:
43013 spin_unlock(&cookie->lock);
43014 kfree(op);
43015- fscache_stat(&fscache_n_attr_changed_nobufs);
43016+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43017 _leave(" = %d", -ENOBUFS);
43018 return -ENOBUFS;
43019 }
43020@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43021 /* allocate a retrieval operation and attempt to submit it */
43022 op = kzalloc(sizeof(*op), GFP_NOIO);
43023 if (!op) {
43024- fscache_stat(&fscache_n_retrievals_nomem);
43025+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43026 return NULL;
43027 }
43028
43029@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43030 return 0;
43031 }
43032
43033- fscache_stat(&fscache_n_retrievals_wait);
43034+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43035
43036 jif = jiffies;
43037 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43038 fscache_wait_bit_interruptible,
43039 TASK_INTERRUPTIBLE) != 0) {
43040- fscache_stat(&fscache_n_retrievals_intr);
43041+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43042 _leave(" = -ERESTARTSYS");
43043 return -ERESTARTSYS;
43044 }
43045@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43046 */
43047 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43048 struct fscache_retrieval *op,
43049- atomic_t *stat_op_waits,
43050- atomic_t *stat_object_dead)
43051+ atomic_unchecked_t *stat_op_waits,
43052+ atomic_unchecked_t *stat_object_dead)
43053 {
43054 int ret;
43055
43056@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43057 goto check_if_dead;
43058
43059 _debug(">>> WT");
43060- fscache_stat(stat_op_waits);
43061+ fscache_stat_unchecked(stat_op_waits);
43062 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43063 fscache_wait_bit_interruptible,
43064 TASK_INTERRUPTIBLE) < 0) {
43065@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43066
43067 check_if_dead:
43068 if (unlikely(fscache_object_is_dead(object))) {
43069- fscache_stat(stat_object_dead);
43070+ fscache_stat_unchecked(stat_object_dead);
43071 return -ENOBUFS;
43072 }
43073 return 0;
43074@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43075
43076 _enter("%p,%p,,,", cookie, page);
43077
43078- fscache_stat(&fscache_n_retrievals);
43079+ fscache_stat_unchecked(&fscache_n_retrievals);
43080
43081 if (hlist_empty(&cookie->backing_objects))
43082 goto nobufs;
43083@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43084 goto nobufs_unlock;
43085 spin_unlock(&cookie->lock);
43086
43087- fscache_stat(&fscache_n_retrieval_ops);
43088+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43089
43090 /* pin the netfs read context in case we need to do the actual netfs
43091 * read because we've encountered a cache read failure */
43092@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
43093
43094 error:
43095 if (ret == -ENOMEM)
43096- fscache_stat(&fscache_n_retrievals_nomem);
43097+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43098 else if (ret == -ERESTARTSYS)
43099- fscache_stat(&fscache_n_retrievals_intr);
43100+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43101 else if (ret == -ENODATA)
43102- fscache_stat(&fscache_n_retrievals_nodata);
43103+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43104 else if (ret < 0)
43105- fscache_stat(&fscache_n_retrievals_nobufs);
43106+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43107 else
43108- fscache_stat(&fscache_n_retrievals_ok);
43109+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43110
43111 fscache_put_retrieval(op);
43112 _leave(" = %d", ret);
43113@@ -429,7 +429,7 @@ nobufs_unlock:
43114 spin_unlock(&cookie->lock);
43115 kfree(op);
43116 nobufs:
43117- fscache_stat(&fscache_n_retrievals_nobufs);
43118+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43119 _leave(" = -ENOBUFS");
43120 return -ENOBUFS;
43121 }
43122@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
43123
43124 _enter("%p,,%d,,,", cookie, *nr_pages);
43125
43126- fscache_stat(&fscache_n_retrievals);
43127+ fscache_stat_unchecked(&fscache_n_retrievals);
43128
43129 if (hlist_empty(&cookie->backing_objects))
43130 goto nobufs;
43131@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
43132 goto nobufs_unlock;
43133 spin_unlock(&cookie->lock);
43134
43135- fscache_stat(&fscache_n_retrieval_ops);
43136+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43137
43138 /* pin the netfs read context in case we need to do the actual netfs
43139 * read because we've encountered a cache read failure */
43140@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
43141
43142 error:
43143 if (ret == -ENOMEM)
43144- fscache_stat(&fscache_n_retrievals_nomem);
43145+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43146 else if (ret == -ERESTARTSYS)
43147- fscache_stat(&fscache_n_retrievals_intr);
43148+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43149 else if (ret == -ENODATA)
43150- fscache_stat(&fscache_n_retrievals_nodata);
43151+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43152 else if (ret < 0)
43153- fscache_stat(&fscache_n_retrievals_nobufs);
43154+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43155 else
43156- fscache_stat(&fscache_n_retrievals_ok);
43157+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43158
43159 fscache_put_retrieval(op);
43160 _leave(" = %d", ret);
43161@@ -545,7 +545,7 @@ nobufs_unlock:
43162 spin_unlock(&cookie->lock);
43163 kfree(op);
43164 nobufs:
43165- fscache_stat(&fscache_n_retrievals_nobufs);
43166+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43167 _leave(" = -ENOBUFS");
43168 return -ENOBUFS;
43169 }
43170@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
43171
43172 _enter("%p,%p,,,", cookie, page);
43173
43174- fscache_stat(&fscache_n_allocs);
43175+ fscache_stat_unchecked(&fscache_n_allocs);
43176
43177 if (hlist_empty(&cookie->backing_objects))
43178 goto nobufs;
43179@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
43180 goto nobufs_unlock;
43181 spin_unlock(&cookie->lock);
43182
43183- fscache_stat(&fscache_n_alloc_ops);
43184+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43185
43186 ret = fscache_wait_for_retrieval_activation(
43187 object, op,
43188@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43189
43190 error:
43191 if (ret == -ERESTARTSYS)
43192- fscache_stat(&fscache_n_allocs_intr);
43193+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43194 else if (ret < 0)
43195- fscache_stat(&fscache_n_allocs_nobufs);
43196+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43197 else
43198- fscache_stat(&fscache_n_allocs_ok);
43199+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43200
43201 fscache_put_retrieval(op);
43202 _leave(" = %d", ret);
43203@@ -625,7 +625,7 @@ nobufs_unlock:
43204 spin_unlock(&cookie->lock);
43205 kfree(op);
43206 nobufs:
43207- fscache_stat(&fscache_n_allocs_nobufs);
43208+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43209 _leave(" = -ENOBUFS");
43210 return -ENOBUFS;
43211 }
43212@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43213
43214 spin_lock(&cookie->stores_lock);
43215
43216- fscache_stat(&fscache_n_store_calls);
43217+ fscache_stat_unchecked(&fscache_n_store_calls);
43218
43219 /* find a page to store */
43220 page = NULL;
43221@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43222 page = results[0];
43223 _debug("gang %d [%lx]", n, page->index);
43224 if (page->index > op->store_limit) {
43225- fscache_stat(&fscache_n_store_pages_over_limit);
43226+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43227 goto superseded;
43228 }
43229
43230@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43231 spin_unlock(&cookie->stores_lock);
43232 spin_unlock(&object->lock);
43233
43234- fscache_stat(&fscache_n_store_pages);
43235+ fscache_stat_unchecked(&fscache_n_store_pages);
43236 fscache_stat(&fscache_n_cop_write_page);
43237 ret = object->cache->ops->write_page(op, page);
43238 fscache_stat_d(&fscache_n_cop_write_page);
43239@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43240 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43241 ASSERT(PageFsCache(page));
43242
43243- fscache_stat(&fscache_n_stores);
43244+ fscache_stat_unchecked(&fscache_n_stores);
43245
43246 op = kzalloc(sizeof(*op), GFP_NOIO);
43247 if (!op)
43248@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43249 spin_unlock(&cookie->stores_lock);
43250 spin_unlock(&object->lock);
43251
43252- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43253+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43254 op->store_limit = object->store_limit;
43255
43256 if (fscache_submit_op(object, &op->op) < 0)
43257@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43258
43259 spin_unlock(&cookie->lock);
43260 radix_tree_preload_end();
43261- fscache_stat(&fscache_n_store_ops);
43262- fscache_stat(&fscache_n_stores_ok);
43263+ fscache_stat_unchecked(&fscache_n_store_ops);
43264+ fscache_stat_unchecked(&fscache_n_stores_ok);
43265
43266 /* the work queue now carries its own ref on the object */
43267 fscache_put_operation(&op->op);
43268@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43269 return 0;
43270
43271 already_queued:
43272- fscache_stat(&fscache_n_stores_again);
43273+ fscache_stat_unchecked(&fscache_n_stores_again);
43274 already_pending:
43275 spin_unlock(&cookie->stores_lock);
43276 spin_unlock(&object->lock);
43277 spin_unlock(&cookie->lock);
43278 radix_tree_preload_end();
43279 kfree(op);
43280- fscache_stat(&fscache_n_stores_ok);
43281+ fscache_stat_unchecked(&fscache_n_stores_ok);
43282 _leave(" = 0");
43283 return 0;
43284
43285@@ -851,14 +851,14 @@ nobufs:
43286 spin_unlock(&cookie->lock);
43287 radix_tree_preload_end();
43288 kfree(op);
43289- fscache_stat(&fscache_n_stores_nobufs);
43290+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43291 _leave(" = -ENOBUFS");
43292 return -ENOBUFS;
43293
43294 nomem_free:
43295 kfree(op);
43296 nomem:
43297- fscache_stat(&fscache_n_stores_oom);
43298+ fscache_stat_unchecked(&fscache_n_stores_oom);
43299 _leave(" = -ENOMEM");
43300 return -ENOMEM;
43301 }
43302@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43303 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43304 ASSERTCMP(page, !=, NULL);
43305
43306- fscache_stat(&fscache_n_uncaches);
43307+ fscache_stat_unchecked(&fscache_n_uncaches);
43308
43309 /* cache withdrawal may beat us to it */
43310 if (!PageFsCache(page))
43311@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43312 unsigned long loop;
43313
43314 #ifdef CONFIG_FSCACHE_STATS
43315- atomic_add(pagevec->nr, &fscache_n_marks);
43316+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43317 #endif
43318
43319 for (loop = 0; loop < pagevec->nr; loop++) {
43320diff -urNp linux-3.1.1/fs/fscache/stats.c linux-3.1.1/fs/fscache/stats.c
43321--- linux-3.1.1/fs/fscache/stats.c 2011-11-11 15:19:27.000000000 -0500
43322+++ linux-3.1.1/fs/fscache/stats.c 2011-11-16 18:39:08.000000000 -0500
43323@@ -18,95 +18,95 @@
43324 /*
43325 * operation counters
43326 */
43327-atomic_t fscache_n_op_pend;
43328-atomic_t fscache_n_op_run;
43329-atomic_t fscache_n_op_enqueue;
43330-atomic_t fscache_n_op_requeue;
43331-atomic_t fscache_n_op_deferred_release;
43332-atomic_t fscache_n_op_release;
43333-atomic_t fscache_n_op_gc;
43334-atomic_t fscache_n_op_cancelled;
43335-atomic_t fscache_n_op_rejected;
43336-
43337-atomic_t fscache_n_attr_changed;
43338-atomic_t fscache_n_attr_changed_ok;
43339-atomic_t fscache_n_attr_changed_nobufs;
43340-atomic_t fscache_n_attr_changed_nomem;
43341-atomic_t fscache_n_attr_changed_calls;
43342-
43343-atomic_t fscache_n_allocs;
43344-atomic_t fscache_n_allocs_ok;
43345-atomic_t fscache_n_allocs_wait;
43346-atomic_t fscache_n_allocs_nobufs;
43347-atomic_t fscache_n_allocs_intr;
43348-atomic_t fscache_n_allocs_object_dead;
43349-atomic_t fscache_n_alloc_ops;
43350-atomic_t fscache_n_alloc_op_waits;
43351-
43352-atomic_t fscache_n_retrievals;
43353-atomic_t fscache_n_retrievals_ok;
43354-atomic_t fscache_n_retrievals_wait;
43355-atomic_t fscache_n_retrievals_nodata;
43356-atomic_t fscache_n_retrievals_nobufs;
43357-atomic_t fscache_n_retrievals_intr;
43358-atomic_t fscache_n_retrievals_nomem;
43359-atomic_t fscache_n_retrievals_object_dead;
43360-atomic_t fscache_n_retrieval_ops;
43361-atomic_t fscache_n_retrieval_op_waits;
43362-
43363-atomic_t fscache_n_stores;
43364-atomic_t fscache_n_stores_ok;
43365-atomic_t fscache_n_stores_again;
43366-atomic_t fscache_n_stores_nobufs;
43367-atomic_t fscache_n_stores_oom;
43368-atomic_t fscache_n_store_ops;
43369-atomic_t fscache_n_store_calls;
43370-atomic_t fscache_n_store_pages;
43371-atomic_t fscache_n_store_radix_deletes;
43372-atomic_t fscache_n_store_pages_over_limit;
43373-
43374-atomic_t fscache_n_store_vmscan_not_storing;
43375-atomic_t fscache_n_store_vmscan_gone;
43376-atomic_t fscache_n_store_vmscan_busy;
43377-atomic_t fscache_n_store_vmscan_cancelled;
43378-
43379-atomic_t fscache_n_marks;
43380-atomic_t fscache_n_uncaches;
43381-
43382-atomic_t fscache_n_acquires;
43383-atomic_t fscache_n_acquires_null;
43384-atomic_t fscache_n_acquires_no_cache;
43385-atomic_t fscache_n_acquires_ok;
43386-atomic_t fscache_n_acquires_nobufs;
43387-atomic_t fscache_n_acquires_oom;
43388-
43389-atomic_t fscache_n_updates;
43390-atomic_t fscache_n_updates_null;
43391-atomic_t fscache_n_updates_run;
43392-
43393-atomic_t fscache_n_relinquishes;
43394-atomic_t fscache_n_relinquishes_null;
43395-atomic_t fscache_n_relinquishes_waitcrt;
43396-atomic_t fscache_n_relinquishes_retire;
43397-
43398-atomic_t fscache_n_cookie_index;
43399-atomic_t fscache_n_cookie_data;
43400-atomic_t fscache_n_cookie_special;
43401-
43402-atomic_t fscache_n_object_alloc;
43403-atomic_t fscache_n_object_no_alloc;
43404-atomic_t fscache_n_object_lookups;
43405-atomic_t fscache_n_object_lookups_negative;
43406-atomic_t fscache_n_object_lookups_positive;
43407-atomic_t fscache_n_object_lookups_timed_out;
43408-atomic_t fscache_n_object_created;
43409-atomic_t fscache_n_object_avail;
43410-atomic_t fscache_n_object_dead;
43411-
43412-atomic_t fscache_n_checkaux_none;
43413-atomic_t fscache_n_checkaux_okay;
43414-atomic_t fscache_n_checkaux_update;
43415-atomic_t fscache_n_checkaux_obsolete;
43416+atomic_unchecked_t fscache_n_op_pend;
43417+atomic_unchecked_t fscache_n_op_run;
43418+atomic_unchecked_t fscache_n_op_enqueue;
43419+atomic_unchecked_t fscache_n_op_requeue;
43420+atomic_unchecked_t fscache_n_op_deferred_release;
43421+atomic_unchecked_t fscache_n_op_release;
43422+atomic_unchecked_t fscache_n_op_gc;
43423+atomic_unchecked_t fscache_n_op_cancelled;
43424+atomic_unchecked_t fscache_n_op_rejected;
43425+
43426+atomic_unchecked_t fscache_n_attr_changed;
43427+atomic_unchecked_t fscache_n_attr_changed_ok;
43428+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43429+atomic_unchecked_t fscache_n_attr_changed_nomem;
43430+atomic_unchecked_t fscache_n_attr_changed_calls;
43431+
43432+atomic_unchecked_t fscache_n_allocs;
43433+atomic_unchecked_t fscache_n_allocs_ok;
43434+atomic_unchecked_t fscache_n_allocs_wait;
43435+atomic_unchecked_t fscache_n_allocs_nobufs;
43436+atomic_unchecked_t fscache_n_allocs_intr;
43437+atomic_unchecked_t fscache_n_allocs_object_dead;
43438+atomic_unchecked_t fscache_n_alloc_ops;
43439+atomic_unchecked_t fscache_n_alloc_op_waits;
43440+
43441+atomic_unchecked_t fscache_n_retrievals;
43442+atomic_unchecked_t fscache_n_retrievals_ok;
43443+atomic_unchecked_t fscache_n_retrievals_wait;
43444+atomic_unchecked_t fscache_n_retrievals_nodata;
43445+atomic_unchecked_t fscache_n_retrievals_nobufs;
43446+atomic_unchecked_t fscache_n_retrievals_intr;
43447+atomic_unchecked_t fscache_n_retrievals_nomem;
43448+atomic_unchecked_t fscache_n_retrievals_object_dead;
43449+atomic_unchecked_t fscache_n_retrieval_ops;
43450+atomic_unchecked_t fscache_n_retrieval_op_waits;
43451+
43452+atomic_unchecked_t fscache_n_stores;
43453+atomic_unchecked_t fscache_n_stores_ok;
43454+atomic_unchecked_t fscache_n_stores_again;
43455+atomic_unchecked_t fscache_n_stores_nobufs;
43456+atomic_unchecked_t fscache_n_stores_oom;
43457+atomic_unchecked_t fscache_n_store_ops;
43458+atomic_unchecked_t fscache_n_store_calls;
43459+atomic_unchecked_t fscache_n_store_pages;
43460+atomic_unchecked_t fscache_n_store_radix_deletes;
43461+atomic_unchecked_t fscache_n_store_pages_over_limit;
43462+
43463+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43464+atomic_unchecked_t fscache_n_store_vmscan_gone;
43465+atomic_unchecked_t fscache_n_store_vmscan_busy;
43466+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43467+
43468+atomic_unchecked_t fscache_n_marks;
43469+atomic_unchecked_t fscache_n_uncaches;
43470+
43471+atomic_unchecked_t fscache_n_acquires;
43472+atomic_unchecked_t fscache_n_acquires_null;
43473+atomic_unchecked_t fscache_n_acquires_no_cache;
43474+atomic_unchecked_t fscache_n_acquires_ok;
43475+atomic_unchecked_t fscache_n_acquires_nobufs;
43476+atomic_unchecked_t fscache_n_acquires_oom;
43477+
43478+atomic_unchecked_t fscache_n_updates;
43479+atomic_unchecked_t fscache_n_updates_null;
43480+atomic_unchecked_t fscache_n_updates_run;
43481+
43482+atomic_unchecked_t fscache_n_relinquishes;
43483+atomic_unchecked_t fscache_n_relinquishes_null;
43484+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43485+atomic_unchecked_t fscache_n_relinquishes_retire;
43486+
43487+atomic_unchecked_t fscache_n_cookie_index;
43488+atomic_unchecked_t fscache_n_cookie_data;
43489+atomic_unchecked_t fscache_n_cookie_special;
43490+
43491+atomic_unchecked_t fscache_n_object_alloc;
43492+atomic_unchecked_t fscache_n_object_no_alloc;
43493+atomic_unchecked_t fscache_n_object_lookups;
43494+atomic_unchecked_t fscache_n_object_lookups_negative;
43495+atomic_unchecked_t fscache_n_object_lookups_positive;
43496+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43497+atomic_unchecked_t fscache_n_object_created;
43498+atomic_unchecked_t fscache_n_object_avail;
43499+atomic_unchecked_t fscache_n_object_dead;
43500+
43501+atomic_unchecked_t fscache_n_checkaux_none;
43502+atomic_unchecked_t fscache_n_checkaux_okay;
43503+atomic_unchecked_t fscache_n_checkaux_update;
43504+atomic_unchecked_t fscache_n_checkaux_obsolete;
43505
43506 atomic_t fscache_n_cop_alloc_object;
43507 atomic_t fscache_n_cop_lookup_object;
43508@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43509 seq_puts(m, "FS-Cache statistics\n");
43510
43511 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43512- atomic_read(&fscache_n_cookie_index),
43513- atomic_read(&fscache_n_cookie_data),
43514- atomic_read(&fscache_n_cookie_special));
43515+ atomic_read_unchecked(&fscache_n_cookie_index),
43516+ atomic_read_unchecked(&fscache_n_cookie_data),
43517+ atomic_read_unchecked(&fscache_n_cookie_special));
43518
43519 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43520- atomic_read(&fscache_n_object_alloc),
43521- atomic_read(&fscache_n_object_no_alloc),
43522- atomic_read(&fscache_n_object_avail),
43523- atomic_read(&fscache_n_object_dead));
43524+ atomic_read_unchecked(&fscache_n_object_alloc),
43525+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43526+ atomic_read_unchecked(&fscache_n_object_avail),
43527+ atomic_read_unchecked(&fscache_n_object_dead));
43528 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43529- atomic_read(&fscache_n_checkaux_none),
43530- atomic_read(&fscache_n_checkaux_okay),
43531- atomic_read(&fscache_n_checkaux_update),
43532- atomic_read(&fscache_n_checkaux_obsolete));
43533+ atomic_read_unchecked(&fscache_n_checkaux_none),
43534+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43535+ atomic_read_unchecked(&fscache_n_checkaux_update),
43536+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43537
43538 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43539- atomic_read(&fscache_n_marks),
43540- atomic_read(&fscache_n_uncaches));
43541+ atomic_read_unchecked(&fscache_n_marks),
43542+ atomic_read_unchecked(&fscache_n_uncaches));
43543
43544 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43545 " oom=%u\n",
43546- atomic_read(&fscache_n_acquires),
43547- atomic_read(&fscache_n_acquires_null),
43548- atomic_read(&fscache_n_acquires_no_cache),
43549- atomic_read(&fscache_n_acquires_ok),
43550- atomic_read(&fscache_n_acquires_nobufs),
43551- atomic_read(&fscache_n_acquires_oom));
43552+ atomic_read_unchecked(&fscache_n_acquires),
43553+ atomic_read_unchecked(&fscache_n_acquires_null),
43554+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43555+ atomic_read_unchecked(&fscache_n_acquires_ok),
43556+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43557+ atomic_read_unchecked(&fscache_n_acquires_oom));
43558
43559 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43560- atomic_read(&fscache_n_object_lookups),
43561- atomic_read(&fscache_n_object_lookups_negative),
43562- atomic_read(&fscache_n_object_lookups_positive),
43563- atomic_read(&fscache_n_object_created),
43564- atomic_read(&fscache_n_object_lookups_timed_out));
43565+ atomic_read_unchecked(&fscache_n_object_lookups),
43566+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43567+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43568+ atomic_read_unchecked(&fscache_n_object_created),
43569+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43570
43571 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43572- atomic_read(&fscache_n_updates),
43573- atomic_read(&fscache_n_updates_null),
43574- atomic_read(&fscache_n_updates_run));
43575+ atomic_read_unchecked(&fscache_n_updates),
43576+ atomic_read_unchecked(&fscache_n_updates_null),
43577+ atomic_read_unchecked(&fscache_n_updates_run));
43578
43579 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43580- atomic_read(&fscache_n_relinquishes),
43581- atomic_read(&fscache_n_relinquishes_null),
43582- atomic_read(&fscache_n_relinquishes_waitcrt),
43583- atomic_read(&fscache_n_relinquishes_retire));
43584+ atomic_read_unchecked(&fscache_n_relinquishes),
43585+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43586+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43587+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43588
43589 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43590- atomic_read(&fscache_n_attr_changed),
43591- atomic_read(&fscache_n_attr_changed_ok),
43592- atomic_read(&fscache_n_attr_changed_nobufs),
43593- atomic_read(&fscache_n_attr_changed_nomem),
43594- atomic_read(&fscache_n_attr_changed_calls));
43595+ atomic_read_unchecked(&fscache_n_attr_changed),
43596+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43597+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43598+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43599+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43600
43601 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43602- atomic_read(&fscache_n_allocs),
43603- atomic_read(&fscache_n_allocs_ok),
43604- atomic_read(&fscache_n_allocs_wait),
43605- atomic_read(&fscache_n_allocs_nobufs),
43606- atomic_read(&fscache_n_allocs_intr));
43607+ atomic_read_unchecked(&fscache_n_allocs),
43608+ atomic_read_unchecked(&fscache_n_allocs_ok),
43609+ atomic_read_unchecked(&fscache_n_allocs_wait),
43610+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43611+ atomic_read_unchecked(&fscache_n_allocs_intr));
43612 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43613- atomic_read(&fscache_n_alloc_ops),
43614- atomic_read(&fscache_n_alloc_op_waits),
43615- atomic_read(&fscache_n_allocs_object_dead));
43616+ atomic_read_unchecked(&fscache_n_alloc_ops),
43617+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43618+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43619
43620 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43621 " int=%u oom=%u\n",
43622- atomic_read(&fscache_n_retrievals),
43623- atomic_read(&fscache_n_retrievals_ok),
43624- atomic_read(&fscache_n_retrievals_wait),
43625- atomic_read(&fscache_n_retrievals_nodata),
43626- atomic_read(&fscache_n_retrievals_nobufs),
43627- atomic_read(&fscache_n_retrievals_intr),
43628- atomic_read(&fscache_n_retrievals_nomem));
43629+ atomic_read_unchecked(&fscache_n_retrievals),
43630+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43631+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43632+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43633+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43634+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43635+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43636 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43637- atomic_read(&fscache_n_retrieval_ops),
43638- atomic_read(&fscache_n_retrieval_op_waits),
43639- atomic_read(&fscache_n_retrievals_object_dead));
43640+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43641+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43642+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43643
43644 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43645- atomic_read(&fscache_n_stores),
43646- atomic_read(&fscache_n_stores_ok),
43647- atomic_read(&fscache_n_stores_again),
43648- atomic_read(&fscache_n_stores_nobufs),
43649- atomic_read(&fscache_n_stores_oom));
43650+ atomic_read_unchecked(&fscache_n_stores),
43651+ atomic_read_unchecked(&fscache_n_stores_ok),
43652+ atomic_read_unchecked(&fscache_n_stores_again),
43653+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43654+ atomic_read_unchecked(&fscache_n_stores_oom));
43655 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43656- atomic_read(&fscache_n_store_ops),
43657- atomic_read(&fscache_n_store_calls),
43658- atomic_read(&fscache_n_store_pages),
43659- atomic_read(&fscache_n_store_radix_deletes),
43660- atomic_read(&fscache_n_store_pages_over_limit));
43661+ atomic_read_unchecked(&fscache_n_store_ops),
43662+ atomic_read_unchecked(&fscache_n_store_calls),
43663+ atomic_read_unchecked(&fscache_n_store_pages),
43664+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43665+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43666
43667 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43668- atomic_read(&fscache_n_store_vmscan_not_storing),
43669- atomic_read(&fscache_n_store_vmscan_gone),
43670- atomic_read(&fscache_n_store_vmscan_busy),
43671- atomic_read(&fscache_n_store_vmscan_cancelled));
43672+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43673+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43674+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43675+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43676
43677 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43678- atomic_read(&fscache_n_op_pend),
43679- atomic_read(&fscache_n_op_run),
43680- atomic_read(&fscache_n_op_enqueue),
43681- atomic_read(&fscache_n_op_cancelled),
43682- atomic_read(&fscache_n_op_rejected));
43683+ atomic_read_unchecked(&fscache_n_op_pend),
43684+ atomic_read_unchecked(&fscache_n_op_run),
43685+ atomic_read_unchecked(&fscache_n_op_enqueue),
43686+ atomic_read_unchecked(&fscache_n_op_cancelled),
43687+ atomic_read_unchecked(&fscache_n_op_rejected));
43688 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43689- atomic_read(&fscache_n_op_deferred_release),
43690- atomic_read(&fscache_n_op_release),
43691- atomic_read(&fscache_n_op_gc));
43692+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43693+ atomic_read_unchecked(&fscache_n_op_release),
43694+ atomic_read_unchecked(&fscache_n_op_gc));
43695
43696 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43697 atomic_read(&fscache_n_cop_alloc_object),
43698diff -urNp linux-3.1.1/fs/fs_struct.c linux-3.1.1/fs/fs_struct.c
43699--- linux-3.1.1/fs/fs_struct.c 2011-11-11 15:19:27.000000000 -0500
43700+++ linux-3.1.1/fs/fs_struct.c 2011-11-16 18:40:29.000000000 -0500
43701@@ -4,6 +4,7 @@
43702 #include <linux/path.h>
43703 #include <linux/slab.h>
43704 #include <linux/fs_struct.h>
43705+#include <linux/grsecurity.h>
43706 #include "internal.h"
43707
43708 static inline void path_get_longterm(struct path *path)
43709@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43710 old_root = fs->root;
43711 fs->root = *path;
43712 path_get_longterm(path);
43713+ gr_set_chroot_entries(current, path);
43714 write_seqcount_end(&fs->seq);
43715 spin_unlock(&fs->lock);
43716 if (old_root.dentry)
43717@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43718 && fs->root.mnt == old_root->mnt) {
43719 path_get_longterm(new_root);
43720 fs->root = *new_root;
43721+ gr_set_chroot_entries(p, new_root);
43722 count++;
43723 }
43724 if (fs->pwd.dentry == old_root->dentry
43725@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43726 spin_lock(&fs->lock);
43727 write_seqcount_begin(&fs->seq);
43728 tsk->fs = NULL;
43729- kill = !--fs->users;
43730+ gr_clear_chroot_entries(tsk);
43731+ kill = !atomic_dec_return(&fs->users);
43732 write_seqcount_end(&fs->seq);
43733 spin_unlock(&fs->lock);
43734 task_unlock(tsk);
43735@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43736 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43737 /* We don't need to lock fs - think why ;-) */
43738 if (fs) {
43739- fs->users = 1;
43740+ atomic_set(&fs->users, 1);
43741 fs->in_exec = 0;
43742 spin_lock_init(&fs->lock);
43743 seqcount_init(&fs->seq);
43744@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43745 spin_lock(&old->lock);
43746 fs->root = old->root;
43747 path_get_longterm(&fs->root);
43748+ /* instead of calling gr_set_chroot_entries here,
43749+ we call it from every caller of this function
43750+ */
43751 fs->pwd = old->pwd;
43752 path_get_longterm(&fs->pwd);
43753 spin_unlock(&old->lock);
43754@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43755
43756 task_lock(current);
43757 spin_lock(&fs->lock);
43758- kill = !--fs->users;
43759+ kill = !atomic_dec_return(&fs->users);
43760 current->fs = new_fs;
43761+ gr_set_chroot_entries(current, &new_fs->root);
43762 spin_unlock(&fs->lock);
43763 task_unlock(current);
43764
43765@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43766
43767 /* to be mentioned only in INIT_TASK */
43768 struct fs_struct init_fs = {
43769- .users = 1,
43770+ .users = ATOMIC_INIT(1),
43771 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43772 .seq = SEQCNT_ZERO,
43773 .umask = 0022,
43774@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43775 task_lock(current);
43776
43777 spin_lock(&init_fs.lock);
43778- init_fs.users++;
43779+ atomic_inc(&init_fs.users);
43780 spin_unlock(&init_fs.lock);
43781
43782 spin_lock(&fs->lock);
43783 current->fs = &init_fs;
43784- kill = !--fs->users;
43785+ gr_set_chroot_entries(current, &current->fs->root);
43786+ kill = !atomic_dec_return(&fs->users);
43787 spin_unlock(&fs->lock);
43788
43789 task_unlock(current);
43790diff -urNp linux-3.1.1/fs/fuse/cuse.c linux-3.1.1/fs/fuse/cuse.c
43791--- linux-3.1.1/fs/fuse/cuse.c 2011-11-11 15:19:27.000000000 -0500
43792+++ linux-3.1.1/fs/fuse/cuse.c 2011-11-16 18:39:08.000000000 -0500
43793@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43794 INIT_LIST_HEAD(&cuse_conntbl[i]);
43795
43796 /* inherit and extend fuse_dev_operations */
43797- cuse_channel_fops = fuse_dev_operations;
43798- cuse_channel_fops.owner = THIS_MODULE;
43799- cuse_channel_fops.open = cuse_channel_open;
43800- cuse_channel_fops.release = cuse_channel_release;
43801+ pax_open_kernel();
43802+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43803+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43804+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43805+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43806+ pax_close_kernel();
43807
43808 cuse_class = class_create(THIS_MODULE, "cuse");
43809 if (IS_ERR(cuse_class))
43810diff -urNp linux-3.1.1/fs/fuse/dev.c linux-3.1.1/fs/fuse/dev.c
43811--- linux-3.1.1/fs/fuse/dev.c 2011-11-11 15:19:27.000000000 -0500
43812+++ linux-3.1.1/fs/fuse/dev.c 2011-11-16 18:39:08.000000000 -0500
43813@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
43814 ret = 0;
43815 pipe_lock(pipe);
43816
43817- if (!pipe->readers) {
43818+ if (!atomic_read(&pipe->readers)) {
43819 send_sig(SIGPIPE, current, 0);
43820 if (!ret)
43821 ret = -EPIPE;
43822diff -urNp linux-3.1.1/fs/fuse/dir.c linux-3.1.1/fs/fuse/dir.c
43823--- linux-3.1.1/fs/fuse/dir.c 2011-11-11 15:19:27.000000000 -0500
43824+++ linux-3.1.1/fs/fuse/dir.c 2011-11-16 18:39:08.000000000 -0500
43825@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
43826 return link;
43827 }
43828
43829-static void free_link(char *link)
43830+static void free_link(const char *link)
43831 {
43832 if (!IS_ERR(link))
43833 free_page((unsigned long) link);
43834diff -urNp linux-3.1.1/fs/gfs2/inode.c linux-3.1.1/fs/gfs2/inode.c
43835--- linux-3.1.1/fs/gfs2/inode.c 2011-11-11 15:19:27.000000000 -0500
43836+++ linux-3.1.1/fs/gfs2/inode.c 2011-11-16 18:39:08.000000000 -0500
43837@@ -1517,7 +1517,7 @@ out:
43838
43839 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43840 {
43841- char *s = nd_get_link(nd);
43842+ const char *s = nd_get_link(nd);
43843 if (!IS_ERR(s))
43844 kfree(s);
43845 }
43846diff -urNp linux-3.1.1/fs/hfsplus/catalog.c linux-3.1.1/fs/hfsplus/catalog.c
43847--- linux-3.1.1/fs/hfsplus/catalog.c 2011-11-11 15:19:27.000000000 -0500
43848+++ linux-3.1.1/fs/hfsplus/catalog.c 2011-11-16 19:23:09.000000000 -0500
43849@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43850 int err;
43851 u16 type;
43852
43853+ pax_track_stack();
43854+
43855 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43856 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43857 if (err)
43858@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43859 int entry_size;
43860 int err;
43861
43862+ pax_track_stack();
43863+
43864 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43865 str->name, cnid, inode->i_nlink);
43866 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43867@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
43868 int entry_size, type;
43869 int err;
43870
43871+ pax_track_stack();
43872+
43873 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43874 cnid, src_dir->i_ino, src_name->name,
43875 dst_dir->i_ino, dst_name->name);
43876diff -urNp linux-3.1.1/fs/hfsplus/dir.c linux-3.1.1/fs/hfsplus/dir.c
43877--- linux-3.1.1/fs/hfsplus/dir.c 2011-11-11 15:19:27.000000000 -0500
43878+++ linux-3.1.1/fs/hfsplus/dir.c 2011-11-16 18:40:29.000000000 -0500
43879@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *
43880 struct hfsplus_readdir_data *rd;
43881 u16 type;
43882
43883+ pax_track_stack();
43884+
43885 if (filp->f_pos >= inode->i_size)
43886 return 0;
43887
43888diff -urNp linux-3.1.1/fs/hfsplus/inode.c linux-3.1.1/fs/hfsplus/inode.c
43889--- linux-3.1.1/fs/hfsplus/inode.c 2011-11-11 15:19:27.000000000 -0500
43890+++ linux-3.1.1/fs/hfsplus/inode.c 2011-11-16 18:40:29.000000000 -0500
43891@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode
43892 int res = 0;
43893 u16 type;
43894
43895+ pax_track_stack();
43896+
43897 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43898
43899 HFSPLUS_I(inode)->linkid = 0;
43900@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode
43901 struct hfs_find_data fd;
43902 hfsplus_cat_entry entry;
43903
43904+ pax_track_stack();
43905+
43906 if (HFSPLUS_IS_RSRC(inode))
43907 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43908
43909diff -urNp linux-3.1.1/fs/hfsplus/ioctl.c linux-3.1.1/fs/hfsplus/ioctl.c
43910--- linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-11 15:19:27.000000000 -0500
43911+++ linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-16 18:40:29.000000000 -0500
43912@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43913 struct hfsplus_cat_file *file;
43914 int res;
43915
43916+ pax_track_stack();
43917+
43918 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43919 return -EOPNOTSUPP;
43920
43921@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43922 struct hfsplus_cat_file *file;
43923 ssize_t res = 0;
43924
43925+ pax_track_stack();
43926+
43927 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43928 return -EOPNOTSUPP;
43929
43930diff -urNp linux-3.1.1/fs/hfsplus/super.c linux-3.1.1/fs/hfsplus/super.c
43931--- linux-3.1.1/fs/hfsplus/super.c 2011-11-11 15:19:27.000000000 -0500
43932+++ linux-3.1.1/fs/hfsplus/super.c 2011-11-16 19:23:30.000000000 -0500
43933@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct sup
43934 u64 last_fs_block, last_fs_page;
43935 int err;
43936
43937+ pax_track_stack();
43938+
43939 err = -EINVAL;
43940 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43941 if (!sbi)
43942diff -urNp linux-3.1.1/fs/hugetlbfs/inode.c linux-3.1.1/fs/hugetlbfs/inode.c
43943--- linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-11 15:19:27.000000000 -0500
43944+++ linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-16 18:40:29.000000000 -0500
43945@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs
43946 .kill_sb = kill_litter_super,
43947 };
43948
43949-static struct vfsmount *hugetlbfs_vfsmount;
43950+struct vfsmount *hugetlbfs_vfsmount;
43951
43952 static int can_do_hugetlb_shm(void)
43953 {
43954diff -urNp linux-3.1.1/fs/inode.c linux-3.1.1/fs/inode.c
43955--- linux-3.1.1/fs/inode.c 2011-11-11 15:19:27.000000000 -0500
43956+++ linux-3.1.1/fs/inode.c 2011-11-16 18:39:08.000000000 -0500
43957@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
43958
43959 #ifdef CONFIG_SMP
43960 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43961- static atomic_t shared_last_ino;
43962- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43963+ static atomic_unchecked_t shared_last_ino;
43964+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43965
43966 res = next - LAST_INO_BATCH;
43967 }
43968diff -urNp linux-3.1.1/fs/jbd/checkpoint.c linux-3.1.1/fs/jbd/checkpoint.c
43969--- linux-3.1.1/fs/jbd/checkpoint.c 2011-11-11 15:19:27.000000000 -0500
43970+++ linux-3.1.1/fs/jbd/checkpoint.c 2011-11-16 18:40:29.000000000 -0500
43971@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal
43972 tid_t this_tid;
43973 int result;
43974
43975+ pax_track_stack();
43976+
43977 jbd_debug(1, "Start checkpoint\n");
43978
43979 /*
43980diff -urNp linux-3.1.1/fs/jffs2/compr_rtime.c linux-3.1.1/fs/jffs2/compr_rtime.c
43981--- linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-11 15:19:27.000000000 -0500
43982+++ linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-16 18:40:29.000000000 -0500
43983@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43984 int outpos = 0;
43985 int pos=0;
43986
43987+ pax_track_stack();
43988+
43989 memset(positions,0,sizeof(positions));
43990
43991 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43992@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43993 int outpos = 0;
43994 int pos=0;
43995
43996+ pax_track_stack();
43997+
43998 memset(positions,0,sizeof(positions));
43999
44000 while (outpos<destlen) {
44001diff -urNp linux-3.1.1/fs/jffs2/compr_rubin.c linux-3.1.1/fs/jffs2/compr_rubin.c
44002--- linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-11 15:19:27.000000000 -0500
44003+++ linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-16 18:40:29.000000000 -0500
44004@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
44005 int ret;
44006 uint32_t mysrclen, mydstlen;
44007
44008+ pax_track_stack();
44009+
44010 mysrclen = *sourcelen;
44011 mydstlen = *dstlen - 8;
44012
44013diff -urNp linux-3.1.1/fs/jffs2/erase.c linux-3.1.1/fs/jffs2/erase.c
44014--- linux-3.1.1/fs/jffs2/erase.c 2011-11-11 15:19:27.000000000 -0500
44015+++ linux-3.1.1/fs/jffs2/erase.c 2011-11-16 18:39:08.000000000 -0500
44016@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
44017 struct jffs2_unknown_node marker = {
44018 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44019 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44020- .totlen = cpu_to_je32(c->cleanmarker_size)
44021+ .totlen = cpu_to_je32(c->cleanmarker_size),
44022+ .hdr_crc = cpu_to_je32(0)
44023 };
44024
44025 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44026diff -urNp linux-3.1.1/fs/jffs2/wbuf.c linux-3.1.1/fs/jffs2/wbuf.c
44027--- linux-3.1.1/fs/jffs2/wbuf.c 2011-11-11 15:19:27.000000000 -0500
44028+++ linux-3.1.1/fs/jffs2/wbuf.c 2011-11-16 18:39:08.000000000 -0500
44029@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44030 {
44031 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44032 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44033- .totlen = constant_cpu_to_je32(8)
44034+ .totlen = constant_cpu_to_je32(8),
44035+ .hdr_crc = constant_cpu_to_je32(0)
44036 };
44037
44038 /*
44039diff -urNp linux-3.1.1/fs/jffs2/xattr.c linux-3.1.1/fs/jffs2/xattr.c
44040--- linux-3.1.1/fs/jffs2/xattr.c 2011-11-11 15:19:27.000000000 -0500
44041+++ linux-3.1.1/fs/jffs2/xattr.c 2011-11-16 18:40:29.000000000 -0500
44042@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44043
44044 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44045
44046+ pax_track_stack();
44047+
44048 /* Phase.1 : Merge same xref */
44049 for (i=0; i < XREF_TMPHASH_SIZE; i++)
44050 xref_tmphash[i] = NULL;
44051diff -urNp linux-3.1.1/fs/jfs/super.c linux-3.1.1/fs/jfs/super.c
44052--- linux-3.1.1/fs/jfs/super.c 2011-11-11 15:19:27.000000000 -0500
44053+++ linux-3.1.1/fs/jfs/super.c 2011-11-16 18:39:08.000000000 -0500
44054@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
44055
44056 jfs_inode_cachep =
44057 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44058- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44059+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44060 init_once);
44061 if (jfs_inode_cachep == NULL)
44062 return -ENOMEM;
44063diff -urNp linux-3.1.1/fs/Kconfig.binfmt linux-3.1.1/fs/Kconfig.binfmt
44064--- linux-3.1.1/fs/Kconfig.binfmt 2011-11-11 15:19:27.000000000 -0500
44065+++ linux-3.1.1/fs/Kconfig.binfmt 2011-11-16 18:39:08.000000000 -0500
44066@@ -86,7 +86,7 @@ config HAVE_AOUT
44067
44068 config BINFMT_AOUT
44069 tristate "Kernel support for a.out and ECOFF binaries"
44070- depends on HAVE_AOUT
44071+ depends on HAVE_AOUT && BROKEN
44072 ---help---
44073 A.out (Assembler.OUTput) is a set of formats for libraries and
44074 executables used in the earliest versions of UNIX. Linux used
44075diff -urNp linux-3.1.1/fs/libfs.c linux-3.1.1/fs/libfs.c
44076--- linux-3.1.1/fs/libfs.c 2011-11-11 15:19:27.000000000 -0500
44077+++ linux-3.1.1/fs/libfs.c 2011-11-16 18:39:08.000000000 -0500
44078@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
44079
44080 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44081 struct dentry *next;
44082+ char d_name[sizeof(next->d_iname)];
44083+ const unsigned char *name;
44084+
44085 next = list_entry(p, struct dentry, d_u.d_child);
44086 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44087 if (!simple_positive(next)) {
44088@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
44089
44090 spin_unlock(&next->d_lock);
44091 spin_unlock(&dentry->d_lock);
44092- if (filldir(dirent, next->d_name.name,
44093+ name = next->d_name.name;
44094+ if (name == next->d_iname) {
44095+ memcpy(d_name, name, next->d_name.len);
44096+ name = d_name;
44097+ }
44098+ if (filldir(dirent, name,
44099 next->d_name.len, filp->f_pos,
44100 next->d_inode->i_ino,
44101 dt_type(next->d_inode)) < 0)
44102diff -urNp linux-3.1.1/fs/lockd/clntproc.c linux-3.1.1/fs/lockd/clntproc.c
44103--- linux-3.1.1/fs/lockd/clntproc.c 2011-11-11 15:19:27.000000000 -0500
44104+++ linux-3.1.1/fs/lockd/clntproc.c 2011-11-16 18:40:29.000000000 -0500
44105@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44106 /*
44107 * Cookie counter for NLM requests
44108 */
44109-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44110+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44111
44112 void nlmclnt_next_cookie(struct nlm_cookie *c)
44113 {
44114- u32 cookie = atomic_inc_return(&nlm_cookie);
44115+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44116
44117 memcpy(c->data, &cookie, 4);
44118 c->len=4;
44119@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44120 struct nlm_rqst reqst, *req;
44121 int status;
44122
44123+ pax_track_stack();
44124+
44125 req = &reqst;
44126 memset(req, 0, sizeof(*req));
44127 locks_init_lock(&req->a_args.lock.fl);
44128diff -urNp linux-3.1.1/fs/locks.c linux-3.1.1/fs/locks.c
44129--- linux-3.1.1/fs/locks.c 2011-11-11 15:19:27.000000000 -0500
44130+++ linux-3.1.1/fs/locks.c 2011-11-16 18:39:08.000000000 -0500
44131@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *fil
44132 return;
44133
44134 if (filp->f_op && filp->f_op->flock) {
44135- struct file_lock fl = {
44136+ struct file_lock flock = {
44137 .fl_pid = current->tgid,
44138 .fl_file = filp,
44139 .fl_flags = FL_FLOCK,
44140 .fl_type = F_UNLCK,
44141 .fl_end = OFFSET_MAX,
44142 };
44143- filp->f_op->flock(filp, F_SETLKW, &fl);
44144- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44145- fl.fl_ops->fl_release_private(&fl);
44146+ filp->f_op->flock(filp, F_SETLKW, &flock);
44147+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44148+ flock.fl_ops->fl_release_private(&flock);
44149 }
44150
44151 lock_flocks();
44152diff -urNp linux-3.1.1/fs/logfs/super.c linux-3.1.1/fs/logfs/super.c
44153--- linux-3.1.1/fs/logfs/super.c 2011-11-11 15:19:27.000000000 -0500
44154+++ linux-3.1.1/fs/logfs/super.c 2011-11-16 18:40:29.000000000 -0500
44155@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
44156 struct logfs_disk_super _ds1, *ds1 = &_ds1;
44157 int err, valid0, valid1;
44158
44159+ pax_track_stack();
44160+
44161 /* read first superblock */
44162 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
44163 if (err)
44164diff -urNp linux-3.1.1/fs/namei.c linux-3.1.1/fs/namei.c
44165--- linux-3.1.1/fs/namei.c 2011-11-11 15:19:27.000000000 -0500
44166+++ linux-3.1.1/fs/namei.c 2011-11-17 00:36:54.000000000 -0500
44167@@ -283,14 +283,22 @@ int generic_permission(struct inode *ino
44168
44169 if (S_ISDIR(inode->i_mode)) {
44170 /* DACs are overridable for directories */
44171- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44172- return 0;
44173 if (!(mask & MAY_WRITE))
44174 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44175 return 0;
44176+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44177+ return 0;
44178 return -EACCES;
44179 }
44180 /*
44181+ * Searching includes executable on directories, else just read.
44182+ */
44183+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44184+ if (mask == MAY_READ)
44185+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44186+ return 0;
44187+
44188+ /*
44189 * Read/write DACs are always overridable.
44190 * Executable DACs are overridable when there is
44191 * at least one exec bit set.
44192@@ -299,14 +307,6 @@ int generic_permission(struct inode *ino
44193 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44194 return 0;
44195
44196- /*
44197- * Searching includes executable on directories, else just read.
44198- */
44199- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44200- if (mask == MAY_READ)
44201- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44202- return 0;
44203-
44204 return -EACCES;
44205 }
44206
44207@@ -653,11 +653,19 @@ follow_link(struct path *link, struct na
44208 return error;
44209 }
44210
44211+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44212+ dentry->d_inode, dentry, nd->path.mnt)) {
44213+ error = -EACCES;
44214+ *p = ERR_PTR(error); /* no ->put_link(), please */
44215+ path_put(&nd->path);
44216+ return error;
44217+ }
44218+
44219 nd->last_type = LAST_BIND;
44220 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44221 error = PTR_ERR(*p);
44222 if (!IS_ERR(*p)) {
44223- char *s = nd_get_link(nd);
44224+ const char *s = nd_get_link(nd);
44225 error = 0;
44226 if (s)
44227 error = __vfs_follow_link(nd, s);
44228@@ -1622,6 +1630,12 @@ static int path_lookupat(int dfd, const
44229 if (!err)
44230 err = complete_walk(nd);
44231
44232+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44233+ if (!err)
44234+ path_put(&nd->path);
44235+ err = -ENOENT;
44236+ }
44237+
44238 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44239 if (!nd->inode->i_op->lookup) {
44240 path_put(&nd->path);
44241@@ -1649,6 +1663,9 @@ static int do_path_lookup(int dfd, const
44242 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44243
44244 if (likely(!retval)) {
44245+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44246+ return -ENOENT;
44247+
44248 if (unlikely(!audit_dummy_context())) {
44249 if (nd->path.dentry && nd->inode)
44250 audit_inode(name, nd->path.dentry);
44251@@ -2049,7 +2066,27 @@ static int may_open(struct path *path, i
44252 /*
44253 * Ensure there are no outstanding leases on the file.
44254 */
44255- return break_lease(inode, flag);
44256+ error = break_lease(inode, flag);
44257+
44258+ if (error)
44259+ return error;
44260+
44261+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44262+ error = -EPERM;
44263+ goto exit;
44264+ }
44265+
44266+ if (gr_handle_rawio(inode)) {
44267+ error = -EPERM;
44268+ goto exit;
44269+ }
44270+
44271+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
44272+ error = -EACCES;
44273+ goto exit;
44274+ }
44275+exit:
44276+ return error;
44277 }
44278
44279 static int handle_truncate(struct file *filp)
44280@@ -2110,6 +2147,10 @@ static struct file *do_last(struct namei
44281 error = complete_walk(nd);
44282 if (error)
44283 return ERR_PTR(error);
44284+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44285+ error = -ENOENT;
44286+ goto exit;
44287+ }
44288 audit_inode(pathname, nd->path.dentry);
44289 if (open_flag & O_CREAT) {
44290 error = -EISDIR;
44291@@ -2120,6 +2161,10 @@ static struct file *do_last(struct namei
44292 error = complete_walk(nd);
44293 if (error)
44294 return ERR_PTR(error);
44295+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44296+ error = -ENOENT;
44297+ goto exit;
44298+ }
44299 audit_inode(pathname, dir);
44300 goto ok;
44301 }
44302@@ -2142,6 +2187,11 @@ static struct file *do_last(struct namei
44303 if (error)
44304 return ERR_PTR(-ECHILD);
44305
44306+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44307+ error = -ENOENT;
44308+ goto exit;
44309+ }
44310+
44311 error = -ENOTDIR;
44312 if (nd->flags & LOOKUP_DIRECTORY) {
44313 if (!nd->inode->i_op->lookup)
44314@@ -2181,6 +2231,12 @@ static struct file *do_last(struct namei
44315 /* Negative dentry, just create the file */
44316 if (!dentry->d_inode) {
44317 int mode = op->mode;
44318+
44319+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44320+ error = -EACCES;
44321+ goto exit_mutex_unlock;
44322+ }
44323+
44324 if (!IS_POSIXACL(dir->d_inode))
44325 mode &= ~current_umask();
44326 /*
44327@@ -2204,6 +2260,8 @@ static struct file *do_last(struct namei
44328 error = vfs_create(dir->d_inode, dentry, mode, nd);
44329 if (error)
44330 goto exit_mutex_unlock;
44331+ else
44332+ gr_handle_create(path->dentry, path->mnt);
44333 mutex_unlock(&dir->d_inode->i_mutex);
44334 dput(nd->path.dentry);
44335 nd->path.dentry = dentry;
44336@@ -2213,6 +2271,19 @@ static struct file *do_last(struct namei
44337 /*
44338 * It already exists.
44339 */
44340+
44341+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44342+ error = -ENOENT;
44343+ goto exit_mutex_unlock;
44344+ }
44345+
44346+ /* only check if O_CREAT is specified, all other checks need to go
44347+ into may_open */
44348+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44349+ error = -EACCES;
44350+ goto exit_mutex_unlock;
44351+ }
44352+
44353 mutex_unlock(&dir->d_inode->i_mutex);
44354 audit_inode(pathname, path->dentry);
44355
44356@@ -2425,6 +2496,11 @@ struct dentry *kern_path_create(int dfd,
44357 *path = nd.path;
44358 return dentry;
44359 eexist:
44360+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44361+ dput(dentry);
44362+ dentry = ERR_PTR(-ENOENT);
44363+ goto fail;
44364+ }
44365 dput(dentry);
44366 dentry = ERR_PTR(-EEXIST);
44367 fail:
44368@@ -2447,6 +2523,20 @@ struct dentry *user_path_create(int dfd,
44369 }
44370 EXPORT_SYMBOL(user_path_create);
44371
44372+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44373+{
44374+ char *tmp = getname(pathname);
44375+ struct dentry *res;
44376+ if (IS_ERR(tmp))
44377+ return ERR_CAST(tmp);
44378+ res = kern_path_create(dfd, tmp, path, is_dir);
44379+ if (IS_ERR(res))
44380+ putname(tmp);
44381+ else
44382+ *to = tmp;
44383+ return res;
44384+}
44385+
44386 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44387 {
44388 int error = may_create(dir, dentry);
44389@@ -2514,6 +2604,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44390 error = mnt_want_write(path.mnt);
44391 if (error)
44392 goto out_dput;
44393+
44394+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44395+ error = -EPERM;
44396+ goto out_drop_write;
44397+ }
44398+
44399+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44400+ error = -EACCES;
44401+ goto out_drop_write;
44402+ }
44403+
44404 error = security_path_mknod(&path, dentry, mode, dev);
44405 if (error)
44406 goto out_drop_write;
44407@@ -2531,6 +2632,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44408 }
44409 out_drop_write:
44410 mnt_drop_write(path.mnt);
44411+
44412+ if (!error)
44413+ gr_handle_create(dentry, path.mnt);
44414 out_dput:
44415 dput(dentry);
44416 mutex_unlock(&path.dentry->d_inode->i_mutex);
44417@@ -2580,12 +2684,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44418 error = mnt_want_write(path.mnt);
44419 if (error)
44420 goto out_dput;
44421+
44422+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44423+ error = -EACCES;
44424+ goto out_drop_write;
44425+ }
44426+
44427 error = security_path_mkdir(&path, dentry, mode);
44428 if (error)
44429 goto out_drop_write;
44430 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44431 out_drop_write:
44432 mnt_drop_write(path.mnt);
44433+
44434+ if (!error)
44435+ gr_handle_create(dentry, path.mnt);
44436 out_dput:
44437 dput(dentry);
44438 mutex_unlock(&path.dentry->d_inode->i_mutex);
44439@@ -2665,6 +2778,8 @@ static long do_rmdir(int dfd, const char
44440 char * name;
44441 struct dentry *dentry;
44442 struct nameidata nd;
44443+ ino_t saved_ino = 0;
44444+ dev_t saved_dev = 0;
44445
44446 error = user_path_parent(dfd, pathname, &nd, &name);
44447 if (error)
44448@@ -2693,6 +2808,15 @@ static long do_rmdir(int dfd, const char
44449 error = -ENOENT;
44450 goto exit3;
44451 }
44452+
44453+ saved_ino = dentry->d_inode->i_ino;
44454+ saved_dev = gr_get_dev_from_dentry(dentry);
44455+
44456+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44457+ error = -EACCES;
44458+ goto exit3;
44459+ }
44460+
44461 error = mnt_want_write(nd.path.mnt);
44462 if (error)
44463 goto exit3;
44464@@ -2700,6 +2824,8 @@ static long do_rmdir(int dfd, const char
44465 if (error)
44466 goto exit4;
44467 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44468+ if (!error && (saved_dev || saved_ino))
44469+ gr_handle_delete(saved_ino, saved_dev);
44470 exit4:
44471 mnt_drop_write(nd.path.mnt);
44472 exit3:
44473@@ -2762,6 +2888,8 @@ static long do_unlinkat(int dfd, const c
44474 struct dentry *dentry;
44475 struct nameidata nd;
44476 struct inode *inode = NULL;
44477+ ino_t saved_ino = 0;
44478+ dev_t saved_dev = 0;
44479
44480 error = user_path_parent(dfd, pathname, &nd, &name);
44481 if (error)
44482@@ -2784,6 +2912,16 @@ static long do_unlinkat(int dfd, const c
44483 if (!inode)
44484 goto slashes;
44485 ihold(inode);
44486+
44487+ if (inode->i_nlink <= 1) {
44488+ saved_ino = inode->i_ino;
44489+ saved_dev = gr_get_dev_from_dentry(dentry);
44490+ }
44491+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44492+ error = -EACCES;
44493+ goto exit2;
44494+ }
44495+
44496 error = mnt_want_write(nd.path.mnt);
44497 if (error)
44498 goto exit2;
44499@@ -2791,6 +2929,8 @@ static long do_unlinkat(int dfd, const c
44500 if (error)
44501 goto exit3;
44502 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44503+ if (!error && (saved_ino || saved_dev))
44504+ gr_handle_delete(saved_ino, saved_dev);
44505 exit3:
44506 mnt_drop_write(nd.path.mnt);
44507 exit2:
44508@@ -2866,10 +3006,18 @@ SYSCALL_DEFINE3(symlinkat, const char __
44509 error = mnt_want_write(path.mnt);
44510 if (error)
44511 goto out_dput;
44512+
44513+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44514+ error = -EACCES;
44515+ goto out_drop_write;
44516+ }
44517+
44518 error = security_path_symlink(&path, dentry, from);
44519 if (error)
44520 goto out_drop_write;
44521 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44522+ if (!error)
44523+ gr_handle_create(dentry, path.mnt);
44524 out_drop_write:
44525 mnt_drop_write(path.mnt);
44526 out_dput:
44527@@ -2941,6 +3089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44528 {
44529 struct dentry *new_dentry;
44530 struct path old_path, new_path;
44531+ char *to;
44532 int how = 0;
44533 int error;
44534
44535@@ -2964,7 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44536 if (error)
44537 return error;
44538
44539- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44540+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44541 error = PTR_ERR(new_dentry);
44542 if (IS_ERR(new_dentry))
44543 goto out;
44544@@ -2975,13 +3124,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44545 error = mnt_want_write(new_path.mnt);
44546 if (error)
44547 goto out_dput;
44548+
44549+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44550+ old_path.dentry->d_inode,
44551+ old_path.dentry->d_inode->i_mode, to)) {
44552+ error = -EACCES;
44553+ goto out_drop_write;
44554+ }
44555+
44556+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44557+ old_path.dentry, old_path.mnt, to)) {
44558+ error = -EACCES;
44559+ goto out_drop_write;
44560+ }
44561+
44562 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44563 if (error)
44564 goto out_drop_write;
44565 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44566+ if (!error)
44567+ gr_handle_create(new_dentry, new_path.mnt);
44568 out_drop_write:
44569 mnt_drop_write(new_path.mnt);
44570 out_dput:
44571+ putname(to);
44572 dput(new_dentry);
44573 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44574 path_put(&new_path);
44575@@ -3153,6 +3319,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44576 char *to;
44577 int error;
44578
44579+ pax_track_stack();
44580+
44581 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44582 if (error)
44583 goto exit;
44584@@ -3209,6 +3377,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44585 if (new_dentry == trap)
44586 goto exit5;
44587
44588+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44589+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44590+ to);
44591+ if (error)
44592+ goto exit5;
44593+
44594 error = mnt_want_write(oldnd.path.mnt);
44595 if (error)
44596 goto exit5;
44597@@ -3218,6 +3392,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44598 goto exit6;
44599 error = vfs_rename(old_dir->d_inode, old_dentry,
44600 new_dir->d_inode, new_dentry);
44601+ if (!error)
44602+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44603+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44604 exit6:
44605 mnt_drop_write(oldnd.path.mnt);
44606 exit5:
44607@@ -3243,6 +3420,8 @@ SYSCALL_DEFINE2(rename, const char __use
44608
44609 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44610 {
44611+ char tmpbuf[64];
44612+ const char *newlink;
44613 int len;
44614
44615 len = PTR_ERR(link);
44616@@ -3252,7 +3431,14 @@ int vfs_readlink(struct dentry *dentry,
44617 len = strlen(link);
44618 if (len > (unsigned) buflen)
44619 len = buflen;
44620- if (copy_to_user(buffer, link, len))
44621+
44622+ if (len < sizeof(tmpbuf)) {
44623+ memcpy(tmpbuf, link, len);
44624+ newlink = tmpbuf;
44625+ } else
44626+ newlink = link;
44627+
44628+ if (copy_to_user(buffer, newlink, len))
44629 len = -EFAULT;
44630 out:
44631 return len;
44632diff -urNp linux-3.1.1/fs/namespace.c linux-3.1.1/fs/namespace.c
44633--- linux-3.1.1/fs/namespace.c 2011-11-11 15:19:27.000000000 -0500
44634+++ linux-3.1.1/fs/namespace.c 2011-11-16 18:40:29.000000000 -0500
44635@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mn
44636 if (!(sb->s_flags & MS_RDONLY))
44637 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44638 up_write(&sb->s_umount);
44639+
44640+ gr_log_remount(mnt->mnt_devname, retval);
44641+
44642 return retval;
44643 }
44644
44645@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mn
44646 br_write_unlock(vfsmount_lock);
44647 up_write(&namespace_sem);
44648 release_mounts(&umount_list);
44649+
44650+ gr_log_unmount(mnt->mnt_devname, retval);
44651+
44652 return retval;
44653 }
44654
44655@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_
44656 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44657 MS_STRICTATIME);
44658
44659+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44660+ retval = -EPERM;
44661+ goto dput_out;
44662+ }
44663+
44664+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44665+ retval = -EPERM;
44666+ goto dput_out;
44667+ }
44668+
44669 if (flags & MS_REMOUNT)
44670 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44671 data_page);
44672@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_
44673 dev_name, data_page);
44674 dput_out:
44675 path_put(&path);
44676+
44677+ gr_log_mount(dev_name, dir_name, retval);
44678+
44679 return retval;
44680 }
44681
44682@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44683 if (error)
44684 goto out2;
44685
44686+ if (gr_handle_chroot_pivot()) {
44687+ error = -EPERM;
44688+ goto out2;
44689+ }
44690+
44691 get_fs_root(current->fs, &root);
44692 error = lock_mount(&old);
44693 if (error)
44694diff -urNp linux-3.1.1/fs/ncpfs/dir.c linux-3.1.1/fs/ncpfs/dir.c
44695--- linux-3.1.1/fs/ncpfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44696+++ linux-3.1.1/fs/ncpfs/dir.c 2011-11-16 18:40:29.000000000 -0500
44697@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44698 int res, val = 0, len;
44699 __u8 __name[NCP_MAXPATHLEN + 1];
44700
44701+ pax_track_stack();
44702+
44703 if (dentry == dentry->d_sb->s_root)
44704 return 1;
44705
44706@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44707 int error, res, len;
44708 __u8 __name[NCP_MAXPATHLEN + 1];
44709
44710+ pax_track_stack();
44711+
44712 error = -EIO;
44713 if (!ncp_conn_valid(server))
44714 goto finished;
44715@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44716 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44717 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44718
44719+ pax_track_stack();
44720+
44721 ncp_age_dentry(server, dentry);
44722 len = sizeof(__name);
44723 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44724@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44725 int error, len;
44726 __u8 __name[NCP_MAXPATHLEN + 1];
44727
44728+ pax_track_stack();
44729+
44730 DPRINTK("ncp_mkdir: making %s/%s\n",
44731 dentry->d_parent->d_name.name, dentry->d_name.name);
44732
44733@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44734 int old_len, new_len;
44735 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44736
44737+ pax_track_stack();
44738+
44739 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44740 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44741 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44742diff -urNp linux-3.1.1/fs/ncpfs/inode.c linux-3.1.1/fs/ncpfs/inode.c
44743--- linux-3.1.1/fs/ncpfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44744+++ linux-3.1.1/fs/ncpfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44745@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44746 #endif
44747 struct ncp_entry_info finfo;
44748
44749+ pax_track_stack();
44750+
44751 memset(&data, 0, sizeof(data));
44752 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44753 if (!server)
44754diff -urNp linux-3.1.1/fs/nfs/blocklayout/blocklayout.c linux-3.1.1/fs/nfs/blocklayout/blocklayout.c
44755--- linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-11 15:19:27.000000000 -0500
44756+++ linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-16 18:39:08.000000000 -0500
44757@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block
44758 */
44759 struct parallel_io {
44760 struct kref refcnt;
44761- struct rpc_call_ops call_ops;
44762+ rpc_call_ops_no_const call_ops;
44763 void (*pnfs_callback) (void *data);
44764 void *data;
44765 };
44766diff -urNp linux-3.1.1/fs/nfs/inode.c linux-3.1.1/fs/nfs/inode.c
44767--- linux-3.1.1/fs/nfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44768+++ linux-3.1.1/fs/nfs/inode.c 2011-11-16 18:39:08.000000000 -0500
44769@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44770 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44771 nfsi->attrtimeo_timestamp = jiffies;
44772
44773- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44774+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44775 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44776 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44777 else
44778@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const st
44779 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44780 }
44781
44782-static atomic_long_t nfs_attr_generation_counter;
44783+static atomic_long_unchecked_t nfs_attr_generation_counter;
44784
44785 static unsigned long nfs_read_attr_generation_counter(void)
44786 {
44787- return atomic_long_read(&nfs_attr_generation_counter);
44788+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44789 }
44790
44791 unsigned long nfs_inc_attr_generation_counter(void)
44792 {
44793- return atomic_long_inc_return(&nfs_attr_generation_counter);
44794+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44795 }
44796
44797 void nfs_fattr_init(struct nfs_fattr *fattr)
44798diff -urNp linux-3.1.1/fs/nfsd/nfs4state.c linux-3.1.1/fs/nfsd/nfs4state.c
44799--- linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-11 15:19:27.000000000 -0500
44800+++ linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-16 18:40:29.000000000 -0500
44801@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44802 unsigned int strhashval;
44803 int err;
44804
44805+ pax_track_stack();
44806+
44807 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44808 (long long) lock->lk_offset,
44809 (long long) lock->lk_length);
44810diff -urNp linux-3.1.1/fs/nfsd/nfs4xdr.c linux-3.1.1/fs/nfsd/nfs4xdr.c
44811--- linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-11 15:19:27.000000000 -0500
44812+++ linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-16 18:40:29.000000000 -0500
44813@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44814 .dentry = dentry,
44815 };
44816
44817+ pax_track_stack();
44818+
44819 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44820 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44821 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44822diff -urNp linux-3.1.1/fs/nfsd/vfs.c linux-3.1.1/fs/nfsd/vfs.c
44823--- linux-3.1.1/fs/nfsd/vfs.c 2011-11-11 15:19:27.000000000 -0500
44824+++ linux-3.1.1/fs/nfsd/vfs.c 2011-11-16 18:39:08.000000000 -0500
44825@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44826 } else {
44827 oldfs = get_fs();
44828 set_fs(KERNEL_DS);
44829- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44830+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44831 set_fs(oldfs);
44832 }
44833
44834@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44835
44836 /* Write the data. */
44837 oldfs = get_fs(); set_fs(KERNEL_DS);
44838- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44839+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44840 set_fs(oldfs);
44841 if (host_err < 0)
44842 goto out_nfserr;
44843@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44844 */
44845
44846 oldfs = get_fs(); set_fs(KERNEL_DS);
44847- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44848+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44849 set_fs(oldfs);
44850
44851 if (host_err < 0)
44852diff -urNp linux-3.1.1/fs/notify/fanotify/fanotify_user.c linux-3.1.1/fs/notify/fanotify/fanotify_user.c
44853--- linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-11 15:19:27.000000000 -0500
44854+++ linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-16 18:39:08.000000000 -0500
44855@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44856 goto out_close_fd;
44857
44858 ret = -EFAULT;
44859- if (copy_to_user(buf, &fanotify_event_metadata,
44860+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44861+ copy_to_user(buf, &fanotify_event_metadata,
44862 fanotify_event_metadata.event_len))
44863 goto out_kill_access_response;
44864
44865diff -urNp linux-3.1.1/fs/notify/notification.c linux-3.1.1/fs/notify/notification.c
44866--- linux-3.1.1/fs/notify/notification.c 2011-11-11 15:19:27.000000000 -0500
44867+++ linux-3.1.1/fs/notify/notification.c 2011-11-16 18:39:08.000000000 -0500
44868@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44869 * get set to 0 so it will never get 'freed'
44870 */
44871 static struct fsnotify_event *q_overflow_event;
44872-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44873+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44874
44875 /**
44876 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44877@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44878 */
44879 u32 fsnotify_get_cookie(void)
44880 {
44881- return atomic_inc_return(&fsnotify_sync_cookie);
44882+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44883 }
44884 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44885
44886diff -urNp linux-3.1.1/fs/ntfs/dir.c linux-3.1.1/fs/ntfs/dir.c
44887--- linux-3.1.1/fs/ntfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44888+++ linux-3.1.1/fs/ntfs/dir.c 2011-11-16 18:39:08.000000000 -0500
44889@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44890 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44891 ~(s64)(ndir->itype.index.block_size - 1)));
44892 /* Bounds checks. */
44893- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44894+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44895 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44896 "inode 0x%lx or driver bug.", vdir->i_ino);
44897 goto err_out;
44898diff -urNp linux-3.1.1/fs/ntfs/file.c linux-3.1.1/fs/ntfs/file.c
44899--- linux-3.1.1/fs/ntfs/file.c 2011-11-11 15:19:27.000000000 -0500
44900+++ linux-3.1.1/fs/ntfs/file.c 2011-11-16 18:39:08.000000000 -0500
44901@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
44902 #endif /* NTFS_RW */
44903 };
44904
44905-const struct file_operations ntfs_empty_file_ops = {};
44906+const struct file_operations ntfs_empty_file_ops __read_only;
44907
44908-const struct inode_operations ntfs_empty_inode_ops = {};
44909+const struct inode_operations ntfs_empty_inode_ops __read_only;
44910diff -urNp linux-3.1.1/fs/ocfs2/localalloc.c linux-3.1.1/fs/ocfs2/localalloc.c
44911--- linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-11 15:19:27.000000000 -0500
44912+++ linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-16 18:39:08.000000000 -0500
44913@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44914 goto bail;
44915 }
44916
44917- atomic_inc(&osb->alloc_stats.moves);
44918+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44919
44920 bail:
44921 if (handle)
44922diff -urNp linux-3.1.1/fs/ocfs2/namei.c linux-3.1.1/fs/ocfs2/namei.c
44923--- linux-3.1.1/fs/ocfs2/namei.c 2011-11-11 15:19:27.000000000 -0500
44924+++ linux-3.1.1/fs/ocfs2/namei.c 2011-11-16 18:40:29.000000000 -0500
44925@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44926 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44927 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44928
44929+ pax_track_stack();
44930+
44931 /* At some point it might be nice to break this function up a
44932 * bit. */
44933
44934diff -urNp linux-3.1.1/fs/ocfs2/ocfs2.h linux-3.1.1/fs/ocfs2/ocfs2.h
44935--- linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-11 15:19:27.000000000 -0500
44936+++ linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-16 18:39:08.000000000 -0500
44937@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44938
44939 struct ocfs2_alloc_stats
44940 {
44941- atomic_t moves;
44942- atomic_t local_data;
44943- atomic_t bitmap_data;
44944- atomic_t bg_allocs;
44945- atomic_t bg_extends;
44946+ atomic_unchecked_t moves;
44947+ atomic_unchecked_t local_data;
44948+ atomic_unchecked_t bitmap_data;
44949+ atomic_unchecked_t bg_allocs;
44950+ atomic_unchecked_t bg_extends;
44951 };
44952
44953 enum ocfs2_local_alloc_state
44954diff -urNp linux-3.1.1/fs/ocfs2/suballoc.c linux-3.1.1/fs/ocfs2/suballoc.c
44955--- linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-11 15:19:27.000000000 -0500
44956+++ linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-16 18:39:08.000000000 -0500
44957@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44958 mlog_errno(status);
44959 goto bail;
44960 }
44961- atomic_inc(&osb->alloc_stats.bg_extends);
44962+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44963
44964 /* You should never ask for this much metadata */
44965 BUG_ON(bits_wanted >
44966@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44967 mlog_errno(status);
44968 goto bail;
44969 }
44970- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44971+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44972
44973 *suballoc_loc = res.sr_bg_blkno;
44974 *suballoc_bit_start = res.sr_bit_offset;
44975@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44976 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44977 res->sr_bits);
44978
44979- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44980+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44981
44982 BUG_ON(res->sr_bits != 1);
44983
44984@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44985 mlog_errno(status);
44986 goto bail;
44987 }
44988- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44989+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44990
44991 BUG_ON(res.sr_bits != 1);
44992
44993@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44994 cluster_start,
44995 num_clusters);
44996 if (!status)
44997- atomic_inc(&osb->alloc_stats.local_data);
44998+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44999 } else {
45000 if (min_clusters > (osb->bitmap_cpg - 1)) {
45001 /* The only paths asking for contiguousness
45002@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
45003 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45004 res.sr_bg_blkno,
45005 res.sr_bit_offset);
45006- atomic_inc(&osb->alloc_stats.bitmap_data);
45007+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45008 *num_clusters = res.sr_bits;
45009 }
45010 }
45011diff -urNp linux-3.1.1/fs/ocfs2/super.c linux-3.1.1/fs/ocfs2/super.c
45012--- linux-3.1.1/fs/ocfs2/super.c 2011-11-11 15:19:27.000000000 -0500
45013+++ linux-3.1.1/fs/ocfs2/super.c 2011-11-16 18:39:08.000000000 -0500
45014@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
45015 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45016 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45017 "Stats",
45018- atomic_read(&osb->alloc_stats.bitmap_data),
45019- atomic_read(&osb->alloc_stats.local_data),
45020- atomic_read(&osb->alloc_stats.bg_allocs),
45021- atomic_read(&osb->alloc_stats.moves),
45022- atomic_read(&osb->alloc_stats.bg_extends));
45023+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45024+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45025+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45026+ atomic_read_unchecked(&osb->alloc_stats.moves),
45027+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45028
45029 out += snprintf(buf + out, len - out,
45030 "%10s => State: %u Descriptor: %llu Size: %u bits "
45031@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45032 spin_lock_init(&osb->osb_xattr_lock);
45033 ocfs2_init_steal_slots(osb);
45034
45035- atomic_set(&osb->alloc_stats.moves, 0);
45036- atomic_set(&osb->alloc_stats.local_data, 0);
45037- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45038- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45039- atomic_set(&osb->alloc_stats.bg_extends, 0);
45040+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45041+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45042+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45043+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45044+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45045
45046 /* Copy the blockcheck stats from the superblock probe */
45047 osb->osb_ecc_stats = *stats;
45048diff -urNp linux-3.1.1/fs/ocfs2/symlink.c linux-3.1.1/fs/ocfs2/symlink.c
45049--- linux-3.1.1/fs/ocfs2/symlink.c 2011-11-11 15:19:27.000000000 -0500
45050+++ linux-3.1.1/fs/ocfs2/symlink.c 2011-11-16 18:39:08.000000000 -0500
45051@@ -142,7 +142,7 @@ bail:
45052
45053 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45054 {
45055- char *link = nd_get_link(nd);
45056+ const char *link = nd_get_link(nd);
45057 if (!IS_ERR(link))
45058 kfree(link);
45059 }
45060diff -urNp linux-3.1.1/fs/open.c linux-3.1.1/fs/open.c
45061--- linux-3.1.1/fs/open.c 2011-11-11 15:19:27.000000000 -0500
45062+++ linux-3.1.1/fs/open.c 2011-11-16 23:40:57.000000000 -0500
45063@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
45064 error = locks_verify_truncate(inode, NULL, length);
45065 if (!error)
45066 error = security_path_truncate(&path);
45067+
45068+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45069+ error = -EACCES;
45070+
45071 if (!error)
45072 error = do_truncate(path.dentry, length, 0, NULL);
45073
45074@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45075 if (__mnt_is_readonly(path.mnt))
45076 res = -EROFS;
45077
45078+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45079+ res = -EACCES;
45080+
45081 out_path_release:
45082 path_put(&path);
45083 out:
45084@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45085 if (error)
45086 goto dput_and_out;
45087
45088+ gr_log_chdir(path.dentry, path.mnt);
45089+
45090 set_fs_pwd(current->fs, &path);
45091
45092 dput_and_out:
45093@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45094 goto out_putf;
45095
45096 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45097+
45098+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45099+ error = -EPERM;
45100+
45101+ if (!error)
45102+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45103+
45104 if (!error)
45105 set_fs_pwd(current->fs, &file->f_path);
45106 out_putf:
45107@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
45108 if (error)
45109 goto dput_and_out;
45110
45111+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45112+ goto dput_and_out;
45113+
45114 set_fs_root(current->fs, &path);
45115+
45116+ gr_handle_chroot_chdir(&path);
45117+
45118 error = 0;
45119 dput_and_out:
45120 path_put(&path);
45121@@ -456,6 +478,16 @@ static int chmod_common(struct path *pat
45122 if (error)
45123 return error;
45124 mutex_lock(&inode->i_mutex);
45125+
45126+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45127+ error = -EACCES;
45128+ goto out_unlock;
45129+ }
45130+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45131+ error = -EACCES;
45132+ goto out_unlock;
45133+ }
45134+
45135 error = security_path_chmod(path->dentry, path->mnt, mode);
45136 if (error)
45137 goto out_unlock;
45138@@ -506,6 +538,9 @@ static int chown_common(struct path *pat
45139 int error;
45140 struct iattr newattrs;
45141
45142+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45143+ return -EACCES;
45144+
45145 newattrs.ia_valid = ATTR_CTIME;
45146 if (user != (uid_t) -1) {
45147 newattrs.ia_valid |= ATTR_UID;
45148@@ -976,7 +1011,8 @@ long do_sys_open(int dfd, const char __u
45149 if (!IS_ERR(tmp)) {
45150 fd = get_unused_fd_flags(flags);
45151 if (fd >= 0) {
45152- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
45153+ struct file *f;
45154+ f = do_filp_open(dfd, tmp, &op, lookup);
45155 if (IS_ERR(f)) {
45156 put_unused_fd(fd);
45157 fd = PTR_ERR(f);
45158diff -urNp linux-3.1.1/fs/partitions/ldm.c linux-3.1.1/fs/partitions/ldm.c
45159--- linux-3.1.1/fs/partitions/ldm.c 2011-11-11 15:19:27.000000000 -0500
45160+++ linux-3.1.1/fs/partitions/ldm.c 2011-11-16 18:40:29.000000000 -0500
45161@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
45162 ldm_error ("A VBLK claims to have %d parts.", num);
45163 return false;
45164 }
45165+
45166 if (rec >= num) {
45167 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
45168 return false;
45169@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
45170 goto found;
45171 }
45172
45173- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45174+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45175 if (!f) {
45176 ldm_crit ("Out of memory.");
45177 return false;
45178diff -urNp linux-3.1.1/fs/pipe.c linux-3.1.1/fs/pipe.c
45179--- linux-3.1.1/fs/pipe.c 2011-11-11 15:19:27.000000000 -0500
45180+++ linux-3.1.1/fs/pipe.c 2011-11-16 18:40:29.000000000 -0500
45181@@ -420,9 +420,9 @@ redo:
45182 }
45183 if (bufs) /* More to do? */
45184 continue;
45185- if (!pipe->writers)
45186+ if (!atomic_read(&pipe->writers))
45187 break;
45188- if (!pipe->waiting_writers) {
45189+ if (!atomic_read(&pipe->waiting_writers)) {
45190 /* syscall merging: Usually we must not sleep
45191 * if O_NONBLOCK is set, or if we got some data.
45192 * But if a writer sleeps in kernel space, then
45193@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
45194 mutex_lock(&inode->i_mutex);
45195 pipe = inode->i_pipe;
45196
45197- if (!pipe->readers) {
45198+ if (!atomic_read(&pipe->readers)) {
45199 send_sig(SIGPIPE, current, 0);
45200 ret = -EPIPE;
45201 goto out;
45202@@ -530,7 +530,7 @@ redo1:
45203 for (;;) {
45204 int bufs;
45205
45206- if (!pipe->readers) {
45207+ if (!atomic_read(&pipe->readers)) {
45208 send_sig(SIGPIPE, current, 0);
45209 if (!ret)
45210 ret = -EPIPE;
45211@@ -616,9 +616,9 @@ redo2:
45212 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45213 do_wakeup = 0;
45214 }
45215- pipe->waiting_writers++;
45216+ atomic_inc(&pipe->waiting_writers);
45217 pipe_wait(pipe);
45218- pipe->waiting_writers--;
45219+ atomic_dec(&pipe->waiting_writers);
45220 }
45221 out:
45222 mutex_unlock(&inode->i_mutex);
45223@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
45224 mask = 0;
45225 if (filp->f_mode & FMODE_READ) {
45226 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45227- if (!pipe->writers && filp->f_version != pipe->w_counter)
45228+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45229 mask |= POLLHUP;
45230 }
45231
45232@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45233 * Most Unices do not set POLLERR for FIFOs but on Linux they
45234 * behave exactly like pipes for poll().
45235 */
45236- if (!pipe->readers)
45237+ if (!atomic_read(&pipe->readers))
45238 mask |= POLLERR;
45239 }
45240
45241@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45242
45243 mutex_lock(&inode->i_mutex);
45244 pipe = inode->i_pipe;
45245- pipe->readers -= decr;
45246- pipe->writers -= decw;
45247+ atomic_sub(decr, &pipe->readers);
45248+ atomic_sub(decw, &pipe->writers);
45249
45250- if (!pipe->readers && !pipe->writers) {
45251+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45252 free_pipe_info(inode);
45253 } else {
45254 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45255@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45256
45257 if (inode->i_pipe) {
45258 ret = 0;
45259- inode->i_pipe->readers++;
45260+ atomic_inc(&inode->i_pipe->readers);
45261 }
45262
45263 mutex_unlock(&inode->i_mutex);
45264@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45265
45266 if (inode->i_pipe) {
45267 ret = 0;
45268- inode->i_pipe->writers++;
45269+ atomic_inc(&inode->i_pipe->writers);
45270 }
45271
45272 mutex_unlock(&inode->i_mutex);
45273@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45274 if (inode->i_pipe) {
45275 ret = 0;
45276 if (filp->f_mode & FMODE_READ)
45277- inode->i_pipe->readers++;
45278+ atomic_inc(&inode->i_pipe->readers);
45279 if (filp->f_mode & FMODE_WRITE)
45280- inode->i_pipe->writers++;
45281+ atomic_inc(&inode->i_pipe->writers);
45282 }
45283
45284 mutex_unlock(&inode->i_mutex);
45285@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45286 inode->i_pipe = NULL;
45287 }
45288
45289-static struct vfsmount *pipe_mnt __read_mostly;
45290+struct vfsmount *pipe_mnt __read_mostly;
45291
45292 /*
45293 * pipefs_dname() is called from d_path().
45294@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45295 goto fail_iput;
45296 inode->i_pipe = pipe;
45297
45298- pipe->readers = pipe->writers = 1;
45299+ atomic_set(&pipe->readers, 1);
45300+ atomic_set(&pipe->writers, 1);
45301 inode->i_fop = &rdwr_pipefifo_fops;
45302
45303 /*
45304diff -urNp linux-3.1.1/fs/proc/array.c linux-3.1.1/fs/proc/array.c
45305--- linux-3.1.1/fs/proc/array.c 2011-11-11 15:19:27.000000000 -0500
45306+++ linux-3.1.1/fs/proc/array.c 2011-11-16 18:40:29.000000000 -0500
45307@@ -60,6 +60,7 @@
45308 #include <linux/tty.h>
45309 #include <linux/string.h>
45310 #include <linux/mman.h>
45311+#include <linux/grsecurity.h>
45312 #include <linux/proc_fs.h>
45313 #include <linux/ioport.h>
45314 #include <linux/uaccess.h>
45315@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45316 seq_putc(m, '\n');
45317 }
45318
45319+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45320+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45321+{
45322+ if (p->mm)
45323+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45324+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45325+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45326+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45327+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45328+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45329+ else
45330+ seq_printf(m, "PaX:\t-----\n");
45331+}
45332+#endif
45333+
45334 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45335 struct pid *pid, struct task_struct *task)
45336 {
45337@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45338 task_cpus_allowed(m, task);
45339 cpuset_task_status_allowed(m, task);
45340 task_context_switch_counts(m, task);
45341+
45342+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45343+ task_pax(m, task);
45344+#endif
45345+
45346+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45347+ task_grsec_rbac(m, task);
45348+#endif
45349+
45350 return 0;
45351 }
45352
45353+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45354+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45355+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45356+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45357+#endif
45358+
45359 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45360 struct pid *pid, struct task_struct *task, int whole)
45361 {
45362@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
45363 cputime_t cutime, cstime, utime, stime;
45364 cputime_t cgtime, gtime;
45365 unsigned long rsslim = 0;
45366- char tcomm[sizeof(task->comm)];
45367+ char tcomm[sizeof(task->comm)] = { 0 };
45368 unsigned long flags;
45369
45370+ pax_track_stack();
45371+
45372 state = *get_task_state(task);
45373 vsize = eip = esp = 0;
45374 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45375@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45376 gtime = task->gtime;
45377 }
45378
45379+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45380+ if (PAX_RAND_FLAGS(mm)) {
45381+ eip = 0;
45382+ esp = 0;
45383+ wchan = 0;
45384+ }
45385+#endif
45386+#ifdef CONFIG_GRKERNSEC_HIDESYM
45387+ wchan = 0;
45388+ eip =0;
45389+ esp =0;
45390+#endif
45391+
45392 /* scale priority and nice values from timeslices to -20..20 */
45393 /* to make it look like a "normal" Unix priority/nice value */
45394 priority = task_prio(task);
45395@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45396 vsize,
45397 mm ? get_mm_rss(mm) : 0,
45398 rsslim,
45399+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45400+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45401+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45402+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45403+#else
45404 mm ? (permitted ? mm->start_code : 1) : 0,
45405 mm ? (permitted ? mm->end_code : 1) : 0,
45406 (permitted && mm) ? mm->start_stack : 0,
45407+#endif
45408 esp,
45409 eip,
45410 /* The signal information here is obsolete.
45411@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45412
45413 return 0;
45414 }
45415+
45416+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45417+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45418+{
45419+ u32 curr_ip = 0;
45420+ unsigned long flags;
45421+
45422+ if (lock_task_sighand(task, &flags)) {
45423+ curr_ip = task->signal->curr_ip;
45424+ unlock_task_sighand(task, &flags);
45425+ }
45426+
45427+ return sprintf(buffer, "%pI4\n", &curr_ip);
45428+}
45429+#endif
45430diff -urNp linux-3.1.1/fs/proc/base.c linux-3.1.1/fs/proc/base.c
45431--- linux-3.1.1/fs/proc/base.c 2011-11-11 15:19:27.000000000 -0500
45432+++ linux-3.1.1/fs/proc/base.c 2011-11-16 19:25:48.000000000 -0500
45433@@ -107,6 +107,22 @@ struct pid_entry {
45434 union proc_op op;
45435 };
45436
45437+struct getdents_callback {
45438+ struct linux_dirent __user * current_dir;
45439+ struct linux_dirent __user * previous;
45440+ struct file * file;
45441+ int count;
45442+ int error;
45443+};
45444+
45445+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45446+ loff_t offset, u64 ino, unsigned int d_type)
45447+{
45448+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45449+ buf->error = -EINVAL;
45450+ return 0;
45451+}
45452+
45453 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45454 .name = (NAME), \
45455 .len = sizeof(NAME) - 1, \
45456@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45457 if (task == current)
45458 return mm;
45459
45460+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45461+ return ERR_PTR(-EPERM);
45462+
45463 /*
45464 * If current is actively ptrace'ing, and would also be
45465 * permitted to freshly attach with ptrace now, permit it.
45466@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45467 if (!mm->arg_end)
45468 goto out_mm; /* Shh! No looking before we're done */
45469
45470+ if (gr_acl_handle_procpidmem(task))
45471+ goto out_mm;
45472+
45473 len = mm->arg_end - mm->arg_start;
45474
45475 if (len > PAGE_SIZE)
45476@@ -309,12 +331,28 @@ out:
45477 return res;
45478 }
45479
45480+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45481+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45482+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45483+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45484+#endif
45485+
45486 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45487 {
45488 struct mm_struct *mm = mm_for_maps(task);
45489 int res = PTR_ERR(mm);
45490 if (mm && !IS_ERR(mm)) {
45491 unsigned int nwords = 0;
45492+
45493+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45494+ /* allow if we're currently ptracing this task */
45495+ if (PAX_RAND_FLAGS(mm) &&
45496+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45497+ mmput(mm);
45498+ return 0;
45499+ }
45500+#endif
45501+
45502 do {
45503 nwords += 2;
45504 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45505@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45506 }
45507
45508
45509-#ifdef CONFIG_KALLSYMS
45510+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45511 /*
45512 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45513 * Returns the resolved symbol. If that fails, simply return the address.
45514@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45515 mutex_unlock(&task->signal->cred_guard_mutex);
45516 }
45517
45518-#ifdef CONFIG_STACKTRACE
45519+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45520
45521 #define MAX_STACK_TRACE_DEPTH 64
45522
45523@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45524 return count;
45525 }
45526
45527-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45528+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45529 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45530 {
45531 long nr;
45532@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45533 /************************************************************************/
45534
45535 /* permission checks */
45536-static int proc_fd_access_allowed(struct inode *inode)
45537+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45538 {
45539 struct task_struct *task;
45540 int allowed = 0;
45541@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45542 */
45543 task = get_proc_task(inode);
45544 if (task) {
45545- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45546+ if (log)
45547+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45548+ else
45549+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45550 put_task_struct(task);
45551 }
45552 return allowed;
45553@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45554 if (!task)
45555 goto out_no_task;
45556
45557+ if (gr_acl_handle_procpidmem(task))
45558+ goto out;
45559+
45560 ret = -ENOMEM;
45561 page = (char *)__get_free_page(GFP_TEMPORARY);
45562 if (!page)
45563@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct
45564 path_put(&nd->path);
45565
45566 /* Are we allowed to snoop on the tasks file descriptors? */
45567- if (!proc_fd_access_allowed(inode))
45568+ if (!proc_fd_access_allowed(inode,0))
45569 goto out;
45570
45571 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45572@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dent
45573 struct path path;
45574
45575 /* Are we allowed to snoop on the tasks file descriptors? */
45576- if (!proc_fd_access_allowed(inode))
45577- goto out;
45578+ /* logging this is needed for learning on chromium to work properly,
45579+ but we don't want to flood the logs from 'ps' which does a readlink
45580+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45581+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45582+ */
45583+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45584+ if (!proc_fd_access_allowed(inode,0))
45585+ goto out;
45586+ } else {
45587+ if (!proc_fd_access_allowed(inode,1))
45588+ goto out;
45589+ }
45590
45591 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45592 if (error)
45593@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct
45594 rcu_read_lock();
45595 cred = __task_cred(task);
45596 inode->i_uid = cred->euid;
45597+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45598+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45599+#else
45600 inode->i_gid = cred->egid;
45601+#endif
45602 rcu_read_unlock();
45603 }
45604 security_task_to_inode(task, inode);
45605@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, st
45606 struct inode *inode = dentry->d_inode;
45607 struct task_struct *task;
45608 const struct cred *cred;
45609+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45610+ const struct cred *tmpcred = current_cred();
45611+#endif
45612
45613 generic_fillattr(inode, stat);
45614
45615@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, st
45616 stat->uid = 0;
45617 stat->gid = 0;
45618 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45619+
45620+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45621+ rcu_read_unlock();
45622+ return -ENOENT;
45623+ }
45624+
45625 if (task) {
45626+ cred = __task_cred(task);
45627+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45628+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45629+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45630+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45631+#endif
45632+ ) {
45633+#endif
45634 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45635+#ifdef CONFIG_GRKERNSEC_PROC_USER
45636+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45637+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45638+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45639+#endif
45640 task_dumpable(task)) {
45641- cred = __task_cred(task);
45642 stat->uid = cred->euid;
45643+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45644+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45645+#else
45646 stat->gid = cred->egid;
45647+#endif
45648+ }
45649+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45650+ } else {
45651+ rcu_read_unlock();
45652+ return -ENOENT;
45653 }
45654+#endif
45655 }
45656 rcu_read_unlock();
45657 return 0;
45658@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry
45659
45660 if (task) {
45661 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45662+#ifdef CONFIG_GRKERNSEC_PROC_USER
45663+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45664+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45665+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45666+#endif
45667 task_dumpable(task)) {
45668 rcu_read_lock();
45669 cred = __task_cred(task);
45670 inode->i_uid = cred->euid;
45671+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45672+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45673+#else
45674 inode->i_gid = cred->egid;
45675+#endif
45676 rcu_read_unlock();
45677 } else {
45678 inode->i_uid = 0;
45679@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *in
45680 int fd = proc_fd(inode);
45681
45682 if (task) {
45683- files = get_files_struct(task);
45684+ if (!gr_acl_handle_procpidmem(task))
45685+ files = get_files_struct(task);
45686 put_task_struct(task);
45687 }
45688 if (files) {
45689@@ -2176,11 +2275,21 @@ static const struct file_operations proc
45690 */
45691 static int proc_fd_permission(struct inode *inode, int mask)
45692 {
45693+ struct task_struct *task;
45694 int rv = generic_permission(inode, mask);
45695- if (rv == 0)
45696- return 0;
45697+
45698 if (task_pid(current) == proc_pid(inode))
45699 rv = 0;
45700+
45701+ task = get_proc_task(inode);
45702+ if (task == NULL)
45703+ return rv;
45704+
45705+ if (gr_acl_handle_procpidmem(task))
45706+ rv = -EACCES;
45707+
45708+ put_task_struct(task);
45709+
45710 return rv;
45711 }
45712
45713@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup
45714 if (!task)
45715 goto out_no_task;
45716
45717+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45718+ goto out;
45719+
45720 /*
45721 * Yes, it does not scale. And it should not. Don't add
45722 * new entries into /proc/<tgid>/ without very good reasons.
45723@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct fi
45724 if (!task)
45725 goto out_no_task;
45726
45727+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45728+ goto out;
45729+
45730 ret = 0;
45731 i = filp->f_pos;
45732 switch (i) {
45733@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struc
45734 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45735 void *cookie)
45736 {
45737- char *s = nd_get_link(nd);
45738+ const char *s = nd_get_link(nd);
45739 if (!IS_ERR(s))
45740 __putname(s);
45741 }
45742@@ -2663,6 +2778,7 @@ static struct dentry *proc_base_instanti
45743 if (p->fop)
45744 inode->i_fop = p->fop;
45745 ei->op = p->op;
45746+
45747 d_add(dentry, inode);
45748 error = NULL;
45749 out:
45750@@ -2802,7 +2918,7 @@ static const struct pid_entry tgid_base_
45751 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45752 #endif
45753 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45754-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45755+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45756 INF("syscall", S_IRUGO, proc_pid_syscall),
45757 #endif
45758 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45759@@ -2827,10 +2943,10 @@ static const struct pid_entry tgid_base_
45760 #ifdef CONFIG_SECURITY
45761 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45762 #endif
45763-#ifdef CONFIG_KALLSYMS
45764+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45765 INF("wchan", S_IRUGO, proc_pid_wchan),
45766 #endif
45767-#ifdef CONFIG_STACKTRACE
45768+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45769 ONE("stack", S_IRUGO, proc_pid_stack),
45770 #endif
45771 #ifdef CONFIG_SCHEDSTATS
45772@@ -2864,6 +2980,9 @@ static const struct pid_entry tgid_base_
45773 #ifdef CONFIG_HARDWALL
45774 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45775 #endif
45776+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45777+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45778+#endif
45779 };
45780
45781 static int proc_tgid_base_readdir(struct file * filp,
45782@@ -2989,7 +3108,14 @@ static struct dentry *proc_pid_instantia
45783 if (!inode)
45784 goto out;
45785
45786+#ifdef CONFIG_GRKERNSEC_PROC_USER
45787+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45788+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45789+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45790+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45791+#else
45792 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45793+#endif
45794 inode->i_op = &proc_tgid_base_inode_operations;
45795 inode->i_fop = &proc_tgid_base_operations;
45796 inode->i_flags|=S_IMMUTABLE;
45797@@ -3031,7 +3157,14 @@ struct dentry *proc_pid_lookup(struct in
45798 if (!task)
45799 goto out;
45800
45801+ if (!has_group_leader_pid(task))
45802+ goto out_put_task;
45803+
45804+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45805+ goto out_put_task;
45806+
45807 result = proc_pid_instantiate(dir, dentry, task, NULL);
45808+out_put_task:
45809 put_task_struct(task);
45810 out:
45811 return result;
45812@@ -3096,6 +3229,11 @@ int proc_pid_readdir(struct file * filp,
45813 {
45814 unsigned int nr;
45815 struct task_struct *reaper;
45816+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45817+ const struct cred *tmpcred = current_cred();
45818+ const struct cred *itercred;
45819+#endif
45820+ filldir_t __filldir = filldir;
45821 struct tgid_iter iter;
45822 struct pid_namespace *ns;
45823
45824@@ -3119,8 +3257,27 @@ int proc_pid_readdir(struct file * filp,
45825 for (iter = next_tgid(ns, iter);
45826 iter.task;
45827 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45828+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45829+ rcu_read_lock();
45830+ itercred = __task_cred(iter.task);
45831+#endif
45832+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45833+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45834+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45835+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45836+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45837+#endif
45838+ )
45839+#endif
45840+ )
45841+ __filldir = &gr_fake_filldir;
45842+ else
45843+ __filldir = filldir;
45844+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45845+ rcu_read_unlock();
45846+#endif
45847 filp->f_pos = iter.tgid + TGID_OFFSET;
45848- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45849+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45850 put_task_struct(iter.task);
45851 goto out;
45852 }
45853@@ -3148,7 +3305,7 @@ static const struct pid_entry tid_base_s
45854 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45855 #endif
45856 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45857-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45858+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45859 INF("syscall", S_IRUGO, proc_pid_syscall),
45860 #endif
45861 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45862@@ -3172,10 +3329,10 @@ static const struct pid_entry tid_base_s
45863 #ifdef CONFIG_SECURITY
45864 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45865 #endif
45866-#ifdef CONFIG_KALLSYMS
45867+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45868 INF("wchan", S_IRUGO, proc_pid_wchan),
45869 #endif
45870-#ifdef CONFIG_STACKTRACE
45871+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45872 ONE("stack", S_IRUGO, proc_pid_stack),
45873 #endif
45874 #ifdef CONFIG_SCHEDSTATS
45875diff -urNp linux-3.1.1/fs/proc/cmdline.c linux-3.1.1/fs/proc/cmdline.c
45876--- linux-3.1.1/fs/proc/cmdline.c 2011-11-11 15:19:27.000000000 -0500
45877+++ linux-3.1.1/fs/proc/cmdline.c 2011-11-16 18:40:29.000000000 -0500
45878@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45879
45880 static int __init proc_cmdline_init(void)
45881 {
45882+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45883+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45884+#else
45885 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45886+#endif
45887 return 0;
45888 }
45889 module_init(proc_cmdline_init);
45890diff -urNp linux-3.1.1/fs/proc/devices.c linux-3.1.1/fs/proc/devices.c
45891--- linux-3.1.1/fs/proc/devices.c 2011-11-11 15:19:27.000000000 -0500
45892+++ linux-3.1.1/fs/proc/devices.c 2011-11-16 18:40:29.000000000 -0500
45893@@ -64,7 +64,11 @@ static const struct file_operations proc
45894
45895 static int __init proc_devices_init(void)
45896 {
45897+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45898+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45899+#else
45900 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45901+#endif
45902 return 0;
45903 }
45904 module_init(proc_devices_init);
45905diff -urNp linux-3.1.1/fs/proc/inode.c linux-3.1.1/fs/proc/inode.c
45906--- linux-3.1.1/fs/proc/inode.c 2011-11-11 15:19:27.000000000 -0500
45907+++ linux-3.1.1/fs/proc/inode.c 2011-11-16 18:40:29.000000000 -0500
45908@@ -18,12 +18,18 @@
45909 #include <linux/module.h>
45910 #include <linux/sysctl.h>
45911 #include <linux/slab.h>
45912+#include <linux/grsecurity.h>
45913
45914 #include <asm/system.h>
45915 #include <asm/uaccess.h>
45916
45917 #include "internal.h"
45918
45919+#ifdef CONFIG_PROC_SYSCTL
45920+extern const struct inode_operations proc_sys_inode_operations;
45921+extern const struct inode_operations proc_sys_dir_operations;
45922+#endif
45923+
45924 static void proc_evict_inode(struct inode *inode)
45925 {
45926 struct proc_dir_entry *de;
45927@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45928 ns_ops = PROC_I(inode)->ns_ops;
45929 if (ns_ops && ns_ops->put)
45930 ns_ops->put(PROC_I(inode)->ns);
45931+
45932+#ifdef CONFIG_PROC_SYSCTL
45933+ if (inode->i_op == &proc_sys_inode_operations ||
45934+ inode->i_op == &proc_sys_dir_operations)
45935+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45936+#endif
45937+
45938 }
45939
45940 static struct kmem_cache * proc_inode_cachep;
45941@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
45942 if (de->mode) {
45943 inode->i_mode = de->mode;
45944 inode->i_uid = de->uid;
45945+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45946+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45947+#else
45948 inode->i_gid = de->gid;
45949+#endif
45950 }
45951 if (de->size)
45952 inode->i_size = de->size;
45953diff -urNp linux-3.1.1/fs/proc/internal.h linux-3.1.1/fs/proc/internal.h
45954--- linux-3.1.1/fs/proc/internal.h 2011-11-11 15:19:27.000000000 -0500
45955+++ linux-3.1.1/fs/proc/internal.h 2011-11-16 18:40:29.000000000 -0500
45956@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45957 struct pid *pid, struct task_struct *task);
45958 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45959 struct pid *pid, struct task_struct *task);
45960+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45961+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45962+#endif
45963 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45964
45965 extern const struct file_operations proc_maps_operations;
45966diff -urNp linux-3.1.1/fs/proc/Kconfig linux-3.1.1/fs/proc/Kconfig
45967--- linux-3.1.1/fs/proc/Kconfig 2011-11-11 15:19:27.000000000 -0500
45968+++ linux-3.1.1/fs/proc/Kconfig 2011-11-16 18:40:29.000000000 -0500
45969@@ -30,12 +30,12 @@ config PROC_FS
45970
45971 config PROC_KCORE
45972 bool "/proc/kcore support" if !ARM
45973- depends on PROC_FS && MMU
45974+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45975
45976 config PROC_VMCORE
45977 bool "/proc/vmcore support"
45978- depends on PROC_FS && CRASH_DUMP
45979- default y
45980+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45981+ default n
45982 help
45983 Exports the dump image of crashed kernel in ELF format.
45984
45985@@ -59,8 +59,8 @@ config PROC_SYSCTL
45986 limited in memory.
45987
45988 config PROC_PAGE_MONITOR
45989- default y
45990- depends on PROC_FS && MMU
45991+ default n
45992+ depends on PROC_FS && MMU && !GRKERNSEC
45993 bool "Enable /proc page monitoring" if EXPERT
45994 help
45995 Various /proc files exist to monitor process memory utilization:
45996diff -urNp linux-3.1.1/fs/proc/kcore.c linux-3.1.1/fs/proc/kcore.c
45997--- linux-3.1.1/fs/proc/kcore.c 2011-11-11 15:19:27.000000000 -0500
45998+++ linux-3.1.1/fs/proc/kcore.c 2011-11-16 18:40:29.000000000 -0500
45999@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
46000 off_t offset = 0;
46001 struct kcore_list *m;
46002
46003+ pax_track_stack();
46004+
46005 /* setup ELF header */
46006 elf = (struct elfhdr *) bufp;
46007 bufp += sizeof(struct elfhdr);
46008@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
46009 * the addresses in the elf_phdr on our list.
46010 */
46011 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46012- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46013+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46014+ if (tsz > buflen)
46015 tsz = buflen;
46016-
46017+
46018 while (buflen) {
46019 struct kcore_list *m;
46020
46021@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
46022 kfree(elf_buf);
46023 } else {
46024 if (kern_addr_valid(start)) {
46025- unsigned long n;
46026+ char *elf_buf;
46027+ mm_segment_t oldfs;
46028
46029- n = copy_to_user(buffer, (char *)start, tsz);
46030- /*
46031- * We cannot distingush between fault on source
46032- * and fault on destination. When this happens
46033- * we clear too and hope it will trigger the
46034- * EFAULT again.
46035- */
46036- if (n) {
46037- if (clear_user(buffer + tsz - n,
46038- n))
46039+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46040+ if (!elf_buf)
46041+ return -ENOMEM;
46042+ oldfs = get_fs();
46043+ set_fs(KERNEL_DS);
46044+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46045+ set_fs(oldfs);
46046+ if (copy_to_user(buffer, elf_buf, tsz)) {
46047+ kfree(elf_buf);
46048 return -EFAULT;
46049+ }
46050 }
46051+ set_fs(oldfs);
46052+ kfree(elf_buf);
46053 } else {
46054 if (clear_user(buffer, tsz))
46055 return -EFAULT;
46056@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46057
46058 static int open_kcore(struct inode *inode, struct file *filp)
46059 {
46060+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46061+ return -EPERM;
46062+#endif
46063 if (!capable(CAP_SYS_RAWIO))
46064 return -EPERM;
46065 if (kcore_need_update)
46066diff -urNp linux-3.1.1/fs/proc/meminfo.c linux-3.1.1/fs/proc/meminfo.c
46067--- linux-3.1.1/fs/proc/meminfo.c 2011-11-11 15:19:27.000000000 -0500
46068+++ linux-3.1.1/fs/proc/meminfo.c 2011-11-16 18:40:29.000000000 -0500
46069@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46070 unsigned long pages[NR_LRU_LISTS];
46071 int lru;
46072
46073+ pax_track_stack();
46074+
46075 /*
46076 * display in kilobytes.
46077 */
46078@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46079 vmi.used >> 10,
46080 vmi.largest_chunk >> 10
46081 #ifdef CONFIG_MEMORY_FAILURE
46082- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46083+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46084 #endif
46085 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46086 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46087diff -urNp linux-3.1.1/fs/proc/nommu.c linux-3.1.1/fs/proc/nommu.c
46088--- linux-3.1.1/fs/proc/nommu.c 2011-11-11 15:19:27.000000000 -0500
46089+++ linux-3.1.1/fs/proc/nommu.c 2011-11-16 18:39:08.000000000 -0500
46090@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
46091 if (len < 1)
46092 len = 1;
46093 seq_printf(m, "%*c", len, ' ');
46094- seq_path(m, &file->f_path, "");
46095+ seq_path(m, &file->f_path, "\n\\");
46096 }
46097
46098 seq_putc(m, '\n');
46099diff -urNp linux-3.1.1/fs/proc/proc_net.c linux-3.1.1/fs/proc/proc_net.c
46100--- linux-3.1.1/fs/proc/proc_net.c 2011-11-11 15:19:27.000000000 -0500
46101+++ linux-3.1.1/fs/proc/proc_net.c 2011-11-16 18:40:29.000000000 -0500
46102@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
46103 struct task_struct *task;
46104 struct nsproxy *ns;
46105 struct net *net = NULL;
46106+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46107+ const struct cred *cred = current_cred();
46108+#endif
46109+
46110+#ifdef CONFIG_GRKERNSEC_PROC_USER
46111+ if (cred->fsuid)
46112+ return net;
46113+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46114+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46115+ return net;
46116+#endif
46117
46118 rcu_read_lock();
46119 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46120diff -urNp linux-3.1.1/fs/proc/proc_sysctl.c linux-3.1.1/fs/proc/proc_sysctl.c
46121--- linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-11 15:19:27.000000000 -0500
46122+++ linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-16 18:40:29.000000000 -0500
46123@@ -8,11 +8,13 @@
46124 #include <linux/namei.h>
46125 #include "internal.h"
46126
46127+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46128+
46129 static const struct dentry_operations proc_sys_dentry_operations;
46130 static const struct file_operations proc_sys_file_operations;
46131-static const struct inode_operations proc_sys_inode_operations;
46132+const struct inode_operations proc_sys_inode_operations;
46133 static const struct file_operations proc_sys_dir_file_operations;
46134-static const struct inode_operations proc_sys_dir_operations;
46135+const struct inode_operations proc_sys_dir_operations;
46136
46137 static struct inode *proc_sys_make_inode(struct super_block *sb,
46138 struct ctl_table_header *head, struct ctl_table *table)
46139@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
46140
46141 err = NULL;
46142 d_set_d_op(dentry, &proc_sys_dentry_operations);
46143+
46144+ gr_handle_proc_create(dentry, inode);
46145+
46146 d_add(dentry, inode);
46147
46148+ if (gr_handle_sysctl(p, MAY_EXEC))
46149+ err = ERR_PTR(-ENOENT);
46150+
46151 out:
46152 sysctl_head_finish(head);
46153 return err;
46154@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
46155 return -ENOMEM;
46156 } else {
46157 d_set_d_op(child, &proc_sys_dentry_operations);
46158+
46159+ gr_handle_proc_create(child, inode);
46160+
46161 d_add(child, inode);
46162 }
46163 } else {
46164@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
46165 if (*pos < file->f_pos)
46166 continue;
46167
46168+ if (gr_handle_sysctl(table, 0))
46169+ continue;
46170+
46171 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46172 if (res)
46173 return res;
46174@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
46175 if (IS_ERR(head))
46176 return PTR_ERR(head);
46177
46178+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46179+ return -ENOENT;
46180+
46181 generic_fillattr(inode, stat);
46182 if (table)
46183 stat->mode = (stat->mode & S_IFMT) | table->mode;
46184@@ -374,13 +391,13 @@ static const struct file_operations proc
46185 .llseek = generic_file_llseek,
46186 };
46187
46188-static const struct inode_operations proc_sys_inode_operations = {
46189+const struct inode_operations proc_sys_inode_operations = {
46190 .permission = proc_sys_permission,
46191 .setattr = proc_sys_setattr,
46192 .getattr = proc_sys_getattr,
46193 };
46194
46195-static const struct inode_operations proc_sys_dir_operations = {
46196+const struct inode_operations proc_sys_dir_operations = {
46197 .lookup = proc_sys_lookup,
46198 .permission = proc_sys_permission,
46199 .setattr = proc_sys_setattr,
46200diff -urNp linux-3.1.1/fs/proc/root.c linux-3.1.1/fs/proc/root.c
46201--- linux-3.1.1/fs/proc/root.c 2011-11-11 15:19:27.000000000 -0500
46202+++ linux-3.1.1/fs/proc/root.c 2011-11-16 18:40:29.000000000 -0500
46203@@ -123,7 +123,15 @@ void __init proc_root_init(void)
46204 #ifdef CONFIG_PROC_DEVICETREE
46205 proc_device_tree_init();
46206 #endif
46207+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46208+#ifdef CONFIG_GRKERNSEC_PROC_USER
46209+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46210+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46211+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46212+#endif
46213+#else
46214 proc_mkdir("bus", NULL);
46215+#endif
46216 proc_sys_init();
46217 }
46218
46219diff -urNp linux-3.1.1/fs/proc/task_mmu.c linux-3.1.1/fs/proc/task_mmu.c
46220--- linux-3.1.1/fs/proc/task_mmu.c 2011-11-11 15:19:27.000000000 -0500
46221+++ linux-3.1.1/fs/proc/task_mmu.c 2011-11-16 18:40:29.000000000 -0500
46222@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
46223 "VmExe:\t%8lu kB\n"
46224 "VmLib:\t%8lu kB\n"
46225 "VmPTE:\t%8lu kB\n"
46226- "VmSwap:\t%8lu kB\n",
46227- hiwater_vm << (PAGE_SHIFT-10),
46228+ "VmSwap:\t%8lu kB\n"
46229+
46230+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46231+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46232+#endif
46233+
46234+ ,hiwater_vm << (PAGE_SHIFT-10),
46235 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46236 mm->locked_vm << (PAGE_SHIFT-10),
46237 hiwater_rss << (PAGE_SHIFT-10),
46238@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46239 data << (PAGE_SHIFT-10),
46240 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46241 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46242- swap << (PAGE_SHIFT-10));
46243+ swap << (PAGE_SHIFT-10)
46244+
46245+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46246+ , mm->context.user_cs_base, mm->context.user_cs_limit
46247+#endif
46248+
46249+ );
46250 }
46251
46252 unsigned long task_vsize(struct mm_struct *mm)
46253@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46254 return ret;
46255 }
46256
46257+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46258+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46259+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46260+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46261+#endif
46262+
46263 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46264 {
46265 struct mm_struct *mm = vma->vm_mm;
46266@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46267 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46268 }
46269
46270- /* We don't show the stack guard page in /proc/maps */
46271+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46272+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46273+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46274+#else
46275 start = vma->vm_start;
46276- if (stack_guard_page_start(vma, start))
46277- start += PAGE_SIZE;
46278 end = vma->vm_end;
46279- if (stack_guard_page_end(vma, end))
46280- end -= PAGE_SIZE;
46281+#endif
46282
46283 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46284 start,
46285@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46286 flags & VM_WRITE ? 'w' : '-',
46287 flags & VM_EXEC ? 'x' : '-',
46288 flags & VM_MAYSHARE ? 's' : 'p',
46289+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46290+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46291+#else
46292 pgoff,
46293+#endif
46294 MAJOR(dev), MINOR(dev), ino, &len);
46295
46296 /*
46297@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46298 */
46299 if (file) {
46300 pad_len_spaces(m, len);
46301- seq_path(m, &file->f_path, "\n");
46302+ seq_path(m, &file->f_path, "\n\\");
46303 } else {
46304 const char *name = arch_vma_name(vma);
46305 if (!name) {
46306@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46307 if (vma->vm_start <= mm->brk &&
46308 vma->vm_end >= mm->start_brk) {
46309 name = "[heap]";
46310- } else if (vma->vm_start <= mm->start_stack &&
46311- vma->vm_end >= mm->start_stack) {
46312+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46313+ (vma->vm_start <= mm->start_stack &&
46314+ vma->vm_end >= mm->start_stack)) {
46315 name = "[stack]";
46316 }
46317 } else {
46318@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46319 };
46320
46321 memset(&mss, 0, sizeof mss);
46322- mss.vma = vma;
46323- /* mmap_sem is held in m_start */
46324- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46325- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46326-
46327+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46328+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46329+#endif
46330+ mss.vma = vma;
46331+ /* mmap_sem is held in m_start */
46332+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46333+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46334+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46335+ }
46336+#endif
46337 show_map_vma(m, vma);
46338
46339 seq_printf(m,
46340@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46341 "KernelPageSize: %8lu kB\n"
46342 "MMUPageSize: %8lu kB\n"
46343 "Locked: %8lu kB\n",
46344+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46345+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46346+#else
46347 (vma->vm_end - vma->vm_start) >> 10,
46348+#endif
46349 mss.resident >> 10,
46350 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46351 mss.shared_clean >> 10,
46352@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46353
46354 if (file) {
46355 seq_printf(m, " file=");
46356- seq_path(m, &file->f_path, "\n\t= ");
46357+ seq_path(m, &file->f_path, "\n\t\\= ");
46358 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46359 seq_printf(m, " heap");
46360 } else if (vma->vm_start <= mm->start_stack &&
46361diff -urNp linux-3.1.1/fs/proc/task_nommu.c linux-3.1.1/fs/proc/task_nommu.c
46362--- linux-3.1.1/fs/proc/task_nommu.c 2011-11-11 15:19:27.000000000 -0500
46363+++ linux-3.1.1/fs/proc/task_nommu.c 2011-11-16 18:39:08.000000000 -0500
46364@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46365 else
46366 bytes += kobjsize(mm);
46367
46368- if (current->fs && current->fs->users > 1)
46369+ if (current->fs && atomic_read(&current->fs->users) > 1)
46370 sbytes += kobjsize(current->fs);
46371 else
46372 bytes += kobjsize(current->fs);
46373@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46374
46375 if (file) {
46376 pad_len_spaces(m, len);
46377- seq_path(m, &file->f_path, "");
46378+ seq_path(m, &file->f_path, "\n\\");
46379 } else if (mm) {
46380 if (vma->vm_start <= mm->start_stack &&
46381 vma->vm_end >= mm->start_stack) {
46382diff -urNp linux-3.1.1/fs/quota/netlink.c linux-3.1.1/fs/quota/netlink.c
46383--- linux-3.1.1/fs/quota/netlink.c 2011-11-11 15:19:27.000000000 -0500
46384+++ linux-3.1.1/fs/quota/netlink.c 2011-11-16 18:39:08.000000000 -0500
46385@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46386 void quota_send_warning(short type, unsigned int id, dev_t dev,
46387 const char warntype)
46388 {
46389- static atomic_t seq;
46390+ static atomic_unchecked_t seq;
46391 struct sk_buff *skb;
46392 void *msg_head;
46393 int ret;
46394@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46395 "VFS: Not enough memory to send quota warning.\n");
46396 return;
46397 }
46398- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46399+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46400 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46401 if (!msg_head) {
46402 printk(KERN_ERR
46403diff -urNp linux-3.1.1/fs/readdir.c linux-3.1.1/fs/readdir.c
46404--- linux-3.1.1/fs/readdir.c 2011-11-11 15:19:27.000000000 -0500
46405+++ linux-3.1.1/fs/readdir.c 2011-11-16 18:40:29.000000000 -0500
46406@@ -17,6 +17,7 @@
46407 #include <linux/security.h>
46408 #include <linux/syscalls.h>
46409 #include <linux/unistd.h>
46410+#include <linux/namei.h>
46411
46412 #include <asm/uaccess.h>
46413
46414@@ -67,6 +68,7 @@ struct old_linux_dirent {
46415
46416 struct readdir_callback {
46417 struct old_linux_dirent __user * dirent;
46418+ struct file * file;
46419 int result;
46420 };
46421
46422@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46423 buf->result = -EOVERFLOW;
46424 return -EOVERFLOW;
46425 }
46426+
46427+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46428+ return 0;
46429+
46430 buf->result++;
46431 dirent = buf->dirent;
46432 if (!access_ok(VERIFY_WRITE, dirent,
46433@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46434
46435 buf.result = 0;
46436 buf.dirent = dirent;
46437+ buf.file = file;
46438
46439 error = vfs_readdir(file, fillonedir, &buf);
46440 if (buf.result)
46441@@ -142,6 +149,7 @@ struct linux_dirent {
46442 struct getdents_callback {
46443 struct linux_dirent __user * current_dir;
46444 struct linux_dirent __user * previous;
46445+ struct file * file;
46446 int count;
46447 int error;
46448 };
46449@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46450 buf->error = -EOVERFLOW;
46451 return -EOVERFLOW;
46452 }
46453+
46454+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46455+ return 0;
46456+
46457 dirent = buf->previous;
46458 if (dirent) {
46459 if (__put_user(offset, &dirent->d_off))
46460@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46461 buf.previous = NULL;
46462 buf.count = count;
46463 buf.error = 0;
46464+ buf.file = file;
46465
46466 error = vfs_readdir(file, filldir, &buf);
46467 if (error >= 0)
46468@@ -229,6 +242,7 @@ out:
46469 struct getdents_callback64 {
46470 struct linux_dirent64 __user * current_dir;
46471 struct linux_dirent64 __user * previous;
46472+ struct file *file;
46473 int count;
46474 int error;
46475 };
46476@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46477 buf->error = -EINVAL; /* only used if we fail.. */
46478 if (reclen > buf->count)
46479 return -EINVAL;
46480+
46481+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46482+ return 0;
46483+
46484 dirent = buf->previous;
46485 if (dirent) {
46486 if (__put_user(offset, &dirent->d_off))
46487@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46488
46489 buf.current_dir = dirent;
46490 buf.previous = NULL;
46491+ buf.file = file;
46492 buf.count = count;
46493 buf.error = 0;
46494
46495@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46496 error = buf.error;
46497 lastdirent = buf.previous;
46498 if (lastdirent) {
46499- typeof(lastdirent->d_off) d_off = file->f_pos;
46500+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46501 if (__put_user(d_off, &lastdirent->d_off))
46502 error = -EFAULT;
46503 else
46504diff -urNp linux-3.1.1/fs/reiserfs/dir.c linux-3.1.1/fs/reiserfs/dir.c
46505--- linux-3.1.1/fs/reiserfs/dir.c 2011-11-11 15:19:27.000000000 -0500
46506+++ linux-3.1.1/fs/reiserfs/dir.c 2011-11-16 18:40:29.000000000 -0500
46507@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentr
46508 struct reiserfs_dir_entry de;
46509 int ret = 0;
46510
46511+ pax_track_stack();
46512+
46513 reiserfs_write_lock(inode->i_sb);
46514
46515 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46516diff -urNp linux-3.1.1/fs/reiserfs/do_balan.c linux-3.1.1/fs/reiserfs/do_balan.c
46517--- linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-11 15:19:27.000000000 -0500
46518+++ linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-16 18:39:08.000000000 -0500
46519@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46520 return;
46521 }
46522
46523- atomic_inc(&(fs_generation(tb->tb_sb)));
46524+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46525 do_balance_starts(tb);
46526
46527 /* balance leaf returns 0 except if combining L R and S into
46528diff -urNp linux-3.1.1/fs/reiserfs/journal.c linux-3.1.1/fs/reiserfs/journal.c
46529--- linux-3.1.1/fs/reiserfs/journal.c 2011-11-11 15:19:27.000000000 -0500
46530+++ linux-3.1.1/fs/reiserfs/journal.c 2011-11-16 18:40:29.000000000 -0500
46531@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_brea
46532 struct buffer_head *bh;
46533 int i, j;
46534
46535+ pax_track_stack();
46536+
46537 bh = __getblk(dev, block, bufsize);
46538 if (buffer_uptodate(bh))
46539 return (bh);
46540diff -urNp linux-3.1.1/fs/reiserfs/namei.c linux-3.1.1/fs/reiserfs/namei.c
46541--- linux-3.1.1/fs/reiserfs/namei.c 2011-11-11 15:19:27.000000000 -0500
46542+++ linux-3.1.1/fs/reiserfs/namei.c 2011-11-16 18:40:29.000000000 -0500
46543@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46544 unsigned long savelink = 1;
46545 struct timespec ctime;
46546
46547+ pax_track_stack();
46548+
46549 /* three balancings: (1) old name removal, (2) new name insertion
46550 and (3) maybe "save" link insertion
46551 stat data updates: (1) old directory,
46552diff -urNp linux-3.1.1/fs/reiserfs/procfs.c linux-3.1.1/fs/reiserfs/procfs.c
46553--- linux-3.1.1/fs/reiserfs/procfs.c 2011-11-11 15:19:27.000000000 -0500
46554+++ linux-3.1.1/fs/reiserfs/procfs.c 2011-11-16 18:40:29.000000000 -0500
46555@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46556 "SMALL_TAILS " : "NO_TAILS ",
46557 replay_only(sb) ? "REPLAY_ONLY " : "",
46558 convert_reiserfs(sb) ? "CONV " : "",
46559- atomic_read(&r->s_generation_counter),
46560+ atomic_read_unchecked(&r->s_generation_counter),
46561 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46562 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46563 SF(s_good_search_by_key_reada), SF(s_bmaps),
46564@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46565 struct journal_params *jp = &rs->s_v1.s_journal;
46566 char b[BDEVNAME_SIZE];
46567
46568+ pax_track_stack();
46569+
46570 seq_printf(m, /* on-disk fields */
46571 "jp_journal_1st_block: \t%i\n"
46572 "jp_journal_dev: \t%s[%x]\n"
46573diff -urNp linux-3.1.1/fs/reiserfs/stree.c linux-3.1.1/fs/reiserfs/stree.c
46574--- linux-3.1.1/fs/reiserfs/stree.c 2011-11-11 15:19:27.000000000 -0500
46575+++ linux-3.1.1/fs/reiserfs/stree.c 2011-11-16 18:40:29.000000000 -0500
46576@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46577 int iter = 0;
46578 #endif
46579
46580+ pax_track_stack();
46581+
46582 BUG_ON(!th->t_trans_id);
46583
46584 init_tb_struct(th, &s_del_balance, sb, path,
46585@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46586 int retval;
46587 int quota_cut_bytes = 0;
46588
46589+ pax_track_stack();
46590+
46591 BUG_ON(!th->t_trans_id);
46592
46593 le_key2cpu_key(&cpu_key, key);
46594@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46595 int quota_cut_bytes;
46596 loff_t tail_pos = 0;
46597
46598+ pax_track_stack();
46599+
46600 BUG_ON(!th->t_trans_id);
46601
46602 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46603@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46604 int retval;
46605 int fs_gen;
46606
46607+ pax_track_stack();
46608+
46609 BUG_ON(!th->t_trans_id);
46610
46611 fs_gen = get_generation(inode->i_sb);
46612@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46613 int fs_gen = 0;
46614 int quota_bytes = 0;
46615
46616+ pax_track_stack();
46617+
46618 BUG_ON(!th->t_trans_id);
46619
46620 if (inode) { /* Do we count quotas for item? */
46621diff -urNp linux-3.1.1/fs/reiserfs/super.c linux-3.1.1/fs/reiserfs/super.c
46622--- linux-3.1.1/fs/reiserfs/super.c 2011-11-11 15:19:27.000000000 -0500
46623+++ linux-3.1.1/fs/reiserfs/super.c 2011-11-16 18:40:29.000000000 -0500
46624@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46625 {.option_name = NULL}
46626 };
46627
46628+ pax_track_stack();
46629+
46630 *blocks = 0;
46631 if (!options || !*options)
46632 /* use default configuration: create tails, journaling on, no
46633diff -urNp linux-3.1.1/fs/select.c linux-3.1.1/fs/select.c
46634--- linux-3.1.1/fs/select.c 2011-11-11 15:19:27.000000000 -0500
46635+++ linux-3.1.1/fs/select.c 2011-11-16 18:40:29.000000000 -0500
46636@@ -20,6 +20,7 @@
46637 #include <linux/module.h>
46638 #include <linux/slab.h>
46639 #include <linux/poll.h>
46640+#include <linux/security.h>
46641 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46642 #include <linux/file.h>
46643 #include <linux/fdtable.h>
46644@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46645 int retval, i, timed_out = 0;
46646 unsigned long slack = 0;
46647
46648+ pax_track_stack();
46649+
46650 rcu_read_lock();
46651 retval = max_select_fd(n, fds);
46652 rcu_read_unlock();
46653@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46654 /* Allocate small arguments on the stack to save memory and be faster */
46655 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46656
46657+ pax_track_stack();
46658+
46659 ret = -EINVAL;
46660 if (n < 0)
46661 goto out_nofds;
46662@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46663 struct poll_list *walk = head;
46664 unsigned long todo = nfds;
46665
46666+ pax_track_stack();
46667+
46668+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46669 if (nfds > rlimit(RLIMIT_NOFILE))
46670 return -EINVAL;
46671
46672diff -urNp linux-3.1.1/fs/seq_file.c linux-3.1.1/fs/seq_file.c
46673--- linux-3.1.1/fs/seq_file.c 2011-11-11 15:19:27.000000000 -0500
46674+++ linux-3.1.1/fs/seq_file.c 2011-11-16 18:39:08.000000000 -0500
46675@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46676 return 0;
46677 }
46678 if (!m->buf) {
46679- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46680+ m->size = PAGE_SIZE;
46681+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46682 if (!m->buf)
46683 return -ENOMEM;
46684 }
46685@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46686 Eoverflow:
46687 m->op->stop(m, p);
46688 kfree(m->buf);
46689- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46690+ m->size <<= 1;
46691+ m->buf = kmalloc(m->size, GFP_KERNEL);
46692 return !m->buf ? -ENOMEM : -EAGAIN;
46693 }
46694
46695@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46696 m->version = file->f_version;
46697 /* grab buffer if we didn't have one */
46698 if (!m->buf) {
46699- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46700+ m->size = PAGE_SIZE;
46701+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46702 if (!m->buf)
46703 goto Enomem;
46704 }
46705@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46706 goto Fill;
46707 m->op->stop(m, p);
46708 kfree(m->buf);
46709- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46710+ m->size <<= 1;
46711+ m->buf = kmalloc(m->size, GFP_KERNEL);
46712 if (!m->buf)
46713 goto Enomem;
46714 m->count = 0;
46715@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46716 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46717 void *data)
46718 {
46719- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46720+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46721 int res = -ENOMEM;
46722
46723 if (op) {
46724diff -urNp linux-3.1.1/fs/splice.c linux-3.1.1/fs/splice.c
46725--- linux-3.1.1/fs/splice.c 2011-11-11 15:19:27.000000000 -0500
46726+++ linux-3.1.1/fs/splice.c 2011-11-16 18:40:29.000000000 -0500
46727@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46728 pipe_lock(pipe);
46729
46730 for (;;) {
46731- if (!pipe->readers) {
46732+ if (!atomic_read(&pipe->readers)) {
46733 send_sig(SIGPIPE, current, 0);
46734 if (!ret)
46735 ret = -EPIPE;
46736@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46737 do_wakeup = 0;
46738 }
46739
46740- pipe->waiting_writers++;
46741+ atomic_inc(&pipe->waiting_writers);
46742 pipe_wait(pipe);
46743- pipe->waiting_writers--;
46744+ atomic_dec(&pipe->waiting_writers);
46745 }
46746
46747 pipe_unlock(pipe);
46748@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46749 .spd_release = spd_release_page,
46750 };
46751
46752+ pax_track_stack();
46753+
46754 if (splice_grow_spd(pipe, &spd))
46755 return -ENOMEM;
46756
46757@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46758 old_fs = get_fs();
46759 set_fs(get_ds());
46760 /* The cast to a user pointer is valid due to the set_fs() */
46761- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46762+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46763 set_fs(old_fs);
46764
46765 return res;
46766@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46767 old_fs = get_fs();
46768 set_fs(get_ds());
46769 /* The cast to a user pointer is valid due to the set_fs() */
46770- res = vfs_write(file, (const char __user *)buf, count, &pos);
46771+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46772 set_fs(old_fs);
46773
46774 return res;
46775@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46776 .spd_release = spd_release_page,
46777 };
46778
46779+ pax_track_stack();
46780+
46781 if (splice_grow_spd(pipe, &spd))
46782 return -ENOMEM;
46783
46784@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46785 goto err;
46786
46787 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46788- vec[i].iov_base = (void __user *) page_address(page);
46789+ vec[i].iov_base = (void __force_user *) page_address(page);
46790 vec[i].iov_len = this_len;
46791 spd.pages[i] = page;
46792 spd.nr_pages++;
46793@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46794 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46795 {
46796 while (!pipe->nrbufs) {
46797- if (!pipe->writers)
46798+ if (!atomic_read(&pipe->writers))
46799 return 0;
46800
46801- if (!pipe->waiting_writers && sd->num_spliced)
46802+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46803 return 0;
46804
46805 if (sd->flags & SPLICE_F_NONBLOCK)
46806@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46807 * out of the pipe right after the splice_to_pipe(). So set
46808 * PIPE_READERS appropriately.
46809 */
46810- pipe->readers = 1;
46811+ atomic_set(&pipe->readers, 1);
46812
46813 current->splice_pipe = pipe;
46814 }
46815@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46816 };
46817 long ret;
46818
46819+ pax_track_stack();
46820+
46821 pipe = get_pipe_info(file);
46822 if (!pipe)
46823 return -EBADF;
46824@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46825 ret = -ERESTARTSYS;
46826 break;
46827 }
46828- if (!pipe->writers)
46829+ if (!atomic_read(&pipe->writers))
46830 break;
46831- if (!pipe->waiting_writers) {
46832+ if (!atomic_read(&pipe->waiting_writers)) {
46833 if (flags & SPLICE_F_NONBLOCK) {
46834 ret = -EAGAIN;
46835 break;
46836@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46837 pipe_lock(pipe);
46838
46839 while (pipe->nrbufs >= pipe->buffers) {
46840- if (!pipe->readers) {
46841+ if (!atomic_read(&pipe->readers)) {
46842 send_sig(SIGPIPE, current, 0);
46843 ret = -EPIPE;
46844 break;
46845@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46846 ret = -ERESTARTSYS;
46847 break;
46848 }
46849- pipe->waiting_writers++;
46850+ atomic_inc(&pipe->waiting_writers);
46851 pipe_wait(pipe);
46852- pipe->waiting_writers--;
46853+ atomic_dec(&pipe->waiting_writers);
46854 }
46855
46856 pipe_unlock(pipe);
46857@@ -1819,14 +1825,14 @@ retry:
46858 pipe_double_lock(ipipe, opipe);
46859
46860 do {
46861- if (!opipe->readers) {
46862+ if (!atomic_read(&opipe->readers)) {
46863 send_sig(SIGPIPE, current, 0);
46864 if (!ret)
46865 ret = -EPIPE;
46866 break;
46867 }
46868
46869- if (!ipipe->nrbufs && !ipipe->writers)
46870+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46871 break;
46872
46873 /*
46874@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46875 pipe_double_lock(ipipe, opipe);
46876
46877 do {
46878- if (!opipe->readers) {
46879+ if (!atomic_read(&opipe->readers)) {
46880 send_sig(SIGPIPE, current, 0);
46881 if (!ret)
46882 ret = -EPIPE;
46883@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46884 * return EAGAIN if we have the potential of some data in the
46885 * future, otherwise just return 0
46886 */
46887- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46888+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46889 ret = -EAGAIN;
46890
46891 pipe_unlock(ipipe);
46892diff -urNp linux-3.1.1/fs/sysfs/file.c linux-3.1.1/fs/sysfs/file.c
46893--- linux-3.1.1/fs/sysfs/file.c 2011-11-11 15:19:27.000000000 -0500
46894+++ linux-3.1.1/fs/sysfs/file.c 2011-11-16 18:39:08.000000000 -0500
46895@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46896
46897 struct sysfs_open_dirent {
46898 atomic_t refcnt;
46899- atomic_t event;
46900+ atomic_unchecked_t event;
46901 wait_queue_head_t poll;
46902 struct list_head buffers; /* goes through sysfs_buffer.list */
46903 };
46904@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46905 if (!sysfs_get_active(attr_sd))
46906 return -ENODEV;
46907
46908- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46909+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46910 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46911
46912 sysfs_put_active(attr_sd);
46913@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46914 return -ENOMEM;
46915
46916 atomic_set(&new_od->refcnt, 0);
46917- atomic_set(&new_od->event, 1);
46918+ atomic_set_unchecked(&new_od->event, 1);
46919 init_waitqueue_head(&new_od->poll);
46920 INIT_LIST_HEAD(&new_od->buffers);
46921 goto retry;
46922@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46923
46924 sysfs_put_active(attr_sd);
46925
46926- if (buffer->event != atomic_read(&od->event))
46927+ if (buffer->event != atomic_read_unchecked(&od->event))
46928 goto trigger;
46929
46930 return DEFAULT_POLLMASK;
46931@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46932
46933 od = sd->s_attr.open;
46934 if (od) {
46935- atomic_inc(&od->event);
46936+ atomic_inc_unchecked(&od->event);
46937 wake_up_interruptible(&od->poll);
46938 }
46939
46940diff -urNp linux-3.1.1/fs/sysfs/mount.c linux-3.1.1/fs/sysfs/mount.c
46941--- linux-3.1.1/fs/sysfs/mount.c 2011-11-11 15:19:27.000000000 -0500
46942+++ linux-3.1.1/fs/sysfs/mount.c 2011-11-16 18:40:29.000000000 -0500
46943@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46944 .s_name = "",
46945 .s_count = ATOMIC_INIT(1),
46946 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46947+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46948+ .s_mode = S_IFDIR | S_IRWXU,
46949+#else
46950 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46951+#endif
46952 .s_ino = 1,
46953 };
46954
46955diff -urNp linux-3.1.1/fs/sysfs/symlink.c linux-3.1.1/fs/sysfs/symlink.c
46956--- linux-3.1.1/fs/sysfs/symlink.c 2011-11-11 15:19:27.000000000 -0500
46957+++ linux-3.1.1/fs/sysfs/symlink.c 2011-11-16 18:39:08.000000000 -0500
46958@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46959
46960 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46961 {
46962- char *page = nd_get_link(nd);
46963+ const char *page = nd_get_link(nd);
46964 if (!IS_ERR(page))
46965 free_page((unsigned long)page);
46966 }
46967diff -urNp linux-3.1.1/fs/udf/inode.c linux-3.1.1/fs/udf/inode.c
46968--- linux-3.1.1/fs/udf/inode.c 2011-11-11 15:19:27.000000000 -0500
46969+++ linux-3.1.1/fs/udf/inode.c 2011-11-16 18:40:29.000000000 -0500
46970@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46971 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46972 int lastblock = 0;
46973
46974+ pax_track_stack();
46975+
46976 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46977 prev_epos.block = iinfo->i_location;
46978 prev_epos.bh = NULL;
46979diff -urNp linux-3.1.1/fs/udf/misc.c linux-3.1.1/fs/udf/misc.c
46980--- linux-3.1.1/fs/udf/misc.c 2011-11-11 15:19:27.000000000 -0500
46981+++ linux-3.1.1/fs/udf/misc.c 2011-11-16 18:39:08.000000000 -0500
46982@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46983
46984 u8 udf_tag_checksum(const struct tag *t)
46985 {
46986- u8 *data = (u8 *)t;
46987+ const u8 *data = (const u8 *)t;
46988 u8 checksum = 0;
46989 int i;
46990 for (i = 0; i < sizeof(struct tag); ++i)
46991diff -urNp linux-3.1.1/fs/utimes.c linux-3.1.1/fs/utimes.c
46992--- linux-3.1.1/fs/utimes.c 2011-11-11 15:19:27.000000000 -0500
46993+++ linux-3.1.1/fs/utimes.c 2011-11-16 18:40:29.000000000 -0500
46994@@ -1,6 +1,7 @@
46995 #include <linux/compiler.h>
46996 #include <linux/file.h>
46997 #include <linux/fs.h>
46998+#include <linux/security.h>
46999 #include <linux/linkage.h>
47000 #include <linux/mount.h>
47001 #include <linux/namei.h>
47002@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47003 goto mnt_drop_write_and_out;
47004 }
47005 }
47006+
47007+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47008+ error = -EACCES;
47009+ goto mnt_drop_write_and_out;
47010+ }
47011+
47012 mutex_lock(&inode->i_mutex);
47013 error = notify_change(path->dentry, &newattrs);
47014 mutex_unlock(&inode->i_mutex);
47015diff -urNp linux-3.1.1/fs/xattr_acl.c linux-3.1.1/fs/xattr_acl.c
47016--- linux-3.1.1/fs/xattr_acl.c 2011-11-11 15:19:27.000000000 -0500
47017+++ linux-3.1.1/fs/xattr_acl.c 2011-11-16 18:39:08.000000000 -0500
47018@@ -17,8 +17,8 @@
47019 struct posix_acl *
47020 posix_acl_from_xattr(const void *value, size_t size)
47021 {
47022- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47023- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47024+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47025+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47026 int count;
47027 struct posix_acl *acl;
47028 struct posix_acl_entry *acl_e;
47029diff -urNp linux-3.1.1/fs/xattr.c linux-3.1.1/fs/xattr.c
47030--- linux-3.1.1/fs/xattr.c 2011-11-11 15:19:27.000000000 -0500
47031+++ linux-3.1.1/fs/xattr.c 2011-11-16 18:40:29.000000000 -0500
47032@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47033 * Extended attribute SET operations
47034 */
47035 static long
47036-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47037+setxattr(struct path *path, const char __user *name, const void __user *value,
47038 size_t size, int flags)
47039 {
47040 int error;
47041@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
47042 return PTR_ERR(kvalue);
47043 }
47044
47045- error = vfs_setxattr(d, kname, kvalue, size, flags);
47046+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47047+ error = -EACCES;
47048+ goto out;
47049+ }
47050+
47051+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47052+out:
47053 kfree(kvalue);
47054 return error;
47055 }
47056@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47057 return error;
47058 error = mnt_want_write(path.mnt);
47059 if (!error) {
47060- error = setxattr(path.dentry, name, value, size, flags);
47061+ error = setxattr(&path, name, value, size, flags);
47062 mnt_drop_write(path.mnt);
47063 }
47064 path_put(&path);
47065@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47066 return error;
47067 error = mnt_want_write(path.mnt);
47068 if (!error) {
47069- error = setxattr(path.dentry, name, value, size, flags);
47070+ error = setxattr(&path, name, value, size, flags);
47071 mnt_drop_write(path.mnt);
47072 }
47073 path_put(&path);
47074@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47075 const void __user *,value, size_t, size, int, flags)
47076 {
47077 struct file *f;
47078- struct dentry *dentry;
47079 int error = -EBADF;
47080
47081 f = fget(fd);
47082 if (!f)
47083 return error;
47084- dentry = f->f_path.dentry;
47085- audit_inode(NULL, dentry);
47086+ audit_inode(NULL, f->f_path.dentry);
47087 error = mnt_want_write_file(f);
47088 if (!error) {
47089- error = setxattr(dentry, name, value, size, flags);
47090+ error = setxattr(&f->f_path, name, value, size, flags);
47091 mnt_drop_write(f->f_path.mnt);
47092 }
47093 fput(f);
47094diff -urNp linux-3.1.1/fs/xfs/xfs_bmap.c linux-3.1.1/fs/xfs/xfs_bmap.c
47095--- linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-11 15:19:27.000000000 -0500
47096+++ linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-16 18:39:08.000000000 -0500
47097@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
47098 int nmap,
47099 int ret_nmap);
47100 #else
47101-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47102+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47103 #endif /* DEBUG */
47104
47105 STATIC int
47106diff -urNp linux-3.1.1/fs/xfs/xfs_dir2_sf.c linux-3.1.1/fs/xfs/xfs_dir2_sf.c
47107--- linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-11 15:19:27.000000000 -0500
47108+++ linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-16 18:39:08.000000000 -0500
47109@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47110 }
47111
47112 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47113- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47114+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47115+ char name[sfep->namelen];
47116+ memcpy(name, sfep->name, sfep->namelen);
47117+ if (filldir(dirent, name, sfep->namelen,
47118+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47119+ *offset = off & 0x7fffffff;
47120+ return 0;
47121+ }
47122+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47123 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47124 *offset = off & 0x7fffffff;
47125 return 0;
47126diff -urNp linux-3.1.1/fs/xfs/xfs_ioctl.c linux-3.1.1/fs/xfs/xfs_ioctl.c
47127--- linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-11 15:19:27.000000000 -0500
47128+++ linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-16 18:39:08.000000000 -0500
47129@@ -128,7 +128,7 @@ xfs_find_handle(
47130 }
47131
47132 error = -EFAULT;
47133- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47134+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47135 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47136 goto out_put;
47137
47138diff -urNp linux-3.1.1/fs/xfs/xfs_iops.c linux-3.1.1/fs/xfs/xfs_iops.c
47139--- linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-11 15:19:27.000000000 -0500
47140+++ linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-16 18:39:08.000000000 -0500
47141@@ -446,7 +446,7 @@ xfs_vn_put_link(
47142 struct nameidata *nd,
47143 void *p)
47144 {
47145- char *s = nd_get_link(nd);
47146+ const char *s = nd_get_link(nd);
47147
47148 if (!IS_ERR(s))
47149 kfree(s);
47150diff -urNp linux-3.1.1/grsecurity/gracl_alloc.c linux-3.1.1/grsecurity/gracl_alloc.c
47151--- linux-3.1.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47152+++ linux-3.1.1/grsecurity/gracl_alloc.c 2011-11-16 18:40:31.000000000 -0500
47153@@ -0,0 +1,105 @@
47154+#include <linux/kernel.h>
47155+#include <linux/mm.h>
47156+#include <linux/slab.h>
47157+#include <linux/vmalloc.h>
47158+#include <linux/gracl.h>
47159+#include <linux/grsecurity.h>
47160+
47161+static unsigned long alloc_stack_next = 1;
47162+static unsigned long alloc_stack_size = 1;
47163+static void **alloc_stack;
47164+
47165+static __inline__ int
47166+alloc_pop(void)
47167+{
47168+ if (alloc_stack_next == 1)
47169+ return 0;
47170+
47171+ kfree(alloc_stack[alloc_stack_next - 2]);
47172+
47173+ alloc_stack_next--;
47174+
47175+ return 1;
47176+}
47177+
47178+static __inline__ int
47179+alloc_push(void *buf)
47180+{
47181+ if (alloc_stack_next >= alloc_stack_size)
47182+ return 1;
47183+
47184+ alloc_stack[alloc_stack_next - 1] = buf;
47185+
47186+ alloc_stack_next++;
47187+
47188+ return 0;
47189+}
47190+
47191+void *
47192+acl_alloc(unsigned long len)
47193+{
47194+ void *ret = NULL;
47195+
47196+ if (!len || len > PAGE_SIZE)
47197+ goto out;
47198+
47199+ ret = kmalloc(len, GFP_KERNEL);
47200+
47201+ if (ret) {
47202+ if (alloc_push(ret)) {
47203+ kfree(ret);
47204+ ret = NULL;
47205+ }
47206+ }
47207+
47208+out:
47209+ return ret;
47210+}
47211+
47212+void *
47213+acl_alloc_num(unsigned long num, unsigned long len)
47214+{
47215+ if (!len || (num > (PAGE_SIZE / len)))
47216+ return NULL;
47217+
47218+ return acl_alloc(num * len);
47219+}
47220+
47221+void
47222+acl_free_all(void)
47223+{
47224+ if (gr_acl_is_enabled() || !alloc_stack)
47225+ return;
47226+
47227+ while (alloc_pop()) ;
47228+
47229+ if (alloc_stack) {
47230+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47231+ kfree(alloc_stack);
47232+ else
47233+ vfree(alloc_stack);
47234+ }
47235+
47236+ alloc_stack = NULL;
47237+ alloc_stack_size = 1;
47238+ alloc_stack_next = 1;
47239+
47240+ return;
47241+}
47242+
47243+int
47244+acl_alloc_stack_init(unsigned long size)
47245+{
47246+ if ((size * sizeof (void *)) <= PAGE_SIZE)
47247+ alloc_stack =
47248+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47249+ else
47250+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47251+
47252+ alloc_stack_size = size;
47253+
47254+ if (!alloc_stack)
47255+ return 0;
47256+ else
47257+ return 1;
47258+}
47259diff -urNp linux-3.1.1/grsecurity/gracl.c linux-3.1.1/grsecurity/gracl.c
47260--- linux-3.1.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47261+++ linux-3.1.1/grsecurity/gracl.c 2011-11-16 19:31:00.000000000 -0500
47262@@ -0,0 +1,4156 @@
47263+#include <linux/kernel.h>
47264+#include <linux/module.h>
47265+#include <linux/sched.h>
47266+#include <linux/mm.h>
47267+#include <linux/file.h>
47268+#include <linux/fs.h>
47269+#include <linux/namei.h>
47270+#include <linux/mount.h>
47271+#include <linux/tty.h>
47272+#include <linux/proc_fs.h>
47273+#include <linux/lglock.h>
47274+#include <linux/slab.h>
47275+#include <linux/vmalloc.h>
47276+#include <linux/types.h>
47277+#include <linux/sysctl.h>
47278+#include <linux/netdevice.h>
47279+#include <linux/ptrace.h>
47280+#include <linux/gracl.h>
47281+#include <linux/gralloc.h>
47282+#include <linux/grsecurity.h>
47283+#include <linux/grinternal.h>
47284+#include <linux/pid_namespace.h>
47285+#include <linux/fdtable.h>
47286+#include <linux/percpu.h>
47287+
47288+#include <asm/uaccess.h>
47289+#include <asm/errno.h>
47290+#include <asm/mman.h>
47291+
47292+static struct acl_role_db acl_role_set;
47293+static struct name_db name_set;
47294+static struct inodev_db inodev_set;
47295+
47296+/* for keeping track of userspace pointers used for subjects, so we
47297+ can share references in the kernel as well
47298+*/
47299+
47300+static struct path real_root;
47301+
47302+static struct acl_subj_map_db subj_map_set;
47303+
47304+static struct acl_role_label *default_role;
47305+
47306+static struct acl_role_label *role_list;
47307+
47308+static u16 acl_sp_role_value;
47309+
47310+extern char *gr_shared_page[4];
47311+static DEFINE_MUTEX(gr_dev_mutex);
47312+DEFINE_RWLOCK(gr_inode_lock);
47313+
47314+struct gr_arg *gr_usermode;
47315+
47316+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47317+
47318+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47319+extern void gr_clear_learn_entries(void);
47320+
47321+#ifdef CONFIG_GRKERNSEC_RESLOG
47322+extern void gr_log_resource(const struct task_struct *task,
47323+ const int res, const unsigned long wanted, const int gt);
47324+#endif
47325+
47326+unsigned char *gr_system_salt;
47327+unsigned char *gr_system_sum;
47328+
47329+static struct sprole_pw **acl_special_roles = NULL;
47330+static __u16 num_sprole_pws = 0;
47331+
47332+static struct acl_role_label *kernel_role = NULL;
47333+
47334+static unsigned int gr_auth_attempts = 0;
47335+static unsigned long gr_auth_expires = 0UL;
47336+
47337+#ifdef CONFIG_NET
47338+extern struct vfsmount *sock_mnt;
47339+#endif
47340+
47341+extern struct vfsmount *pipe_mnt;
47342+extern struct vfsmount *shm_mnt;
47343+#ifdef CONFIG_HUGETLBFS
47344+extern struct vfsmount *hugetlbfs_vfsmount;
47345+#endif
47346+
47347+static struct acl_object_label *fakefs_obj_rw;
47348+static struct acl_object_label *fakefs_obj_rwx;
47349+
47350+extern int gr_init_uidset(void);
47351+extern void gr_free_uidset(void);
47352+extern void gr_remove_uid(uid_t uid);
47353+extern int gr_find_uid(uid_t uid);
47354+
47355+DECLARE_BRLOCK(vfsmount_lock);
47356+
47357+__inline__ int
47358+gr_acl_is_enabled(void)
47359+{
47360+ return (gr_status & GR_READY);
47361+}
47362+
47363+#ifdef CONFIG_BTRFS_FS
47364+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47365+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47366+#endif
47367+
47368+static inline dev_t __get_dev(const struct dentry *dentry)
47369+{
47370+#ifdef CONFIG_BTRFS_FS
47371+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47372+ return get_btrfs_dev_from_inode(dentry->d_inode);
47373+ else
47374+#endif
47375+ return dentry->d_inode->i_sb->s_dev;
47376+}
47377+
47378+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47379+{
47380+ return __get_dev(dentry);
47381+}
47382+
47383+static char gr_task_roletype_to_char(struct task_struct *task)
47384+{
47385+ switch (task->role->roletype &
47386+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47387+ GR_ROLE_SPECIAL)) {
47388+ case GR_ROLE_DEFAULT:
47389+ return 'D';
47390+ case GR_ROLE_USER:
47391+ return 'U';
47392+ case GR_ROLE_GROUP:
47393+ return 'G';
47394+ case GR_ROLE_SPECIAL:
47395+ return 'S';
47396+ }
47397+
47398+ return 'X';
47399+}
47400+
47401+char gr_roletype_to_char(void)
47402+{
47403+ return gr_task_roletype_to_char(current);
47404+}
47405+
47406+__inline__ int
47407+gr_acl_tpe_check(void)
47408+{
47409+ if (unlikely(!(gr_status & GR_READY)))
47410+ return 0;
47411+ if (current->role->roletype & GR_ROLE_TPE)
47412+ return 1;
47413+ else
47414+ return 0;
47415+}
47416+
47417+int
47418+gr_handle_rawio(const struct inode *inode)
47419+{
47420+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47421+ if (inode && S_ISBLK(inode->i_mode) &&
47422+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47423+ !capable(CAP_SYS_RAWIO))
47424+ return 1;
47425+#endif
47426+ return 0;
47427+}
47428+
47429+static int
47430+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47431+{
47432+ if (likely(lena != lenb))
47433+ return 0;
47434+
47435+ return !memcmp(a, b, lena);
47436+}
47437+
47438+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47439+{
47440+ *buflen -= namelen;
47441+ if (*buflen < 0)
47442+ return -ENAMETOOLONG;
47443+ *buffer -= namelen;
47444+ memcpy(*buffer, str, namelen);
47445+ return 0;
47446+}
47447+
47448+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47449+{
47450+ return prepend(buffer, buflen, name->name, name->len);
47451+}
47452+
47453+static int prepend_path(const struct path *path, struct path *root,
47454+ char **buffer, int *buflen)
47455+{
47456+ struct dentry *dentry = path->dentry;
47457+ struct vfsmount *vfsmnt = path->mnt;
47458+ bool slash = false;
47459+ int error = 0;
47460+
47461+ while (dentry != root->dentry || vfsmnt != root->mnt) {
47462+ struct dentry * parent;
47463+
47464+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47465+ /* Global root? */
47466+ if (vfsmnt->mnt_parent == vfsmnt) {
47467+ goto out;
47468+ }
47469+ dentry = vfsmnt->mnt_mountpoint;
47470+ vfsmnt = vfsmnt->mnt_parent;
47471+ continue;
47472+ }
47473+ parent = dentry->d_parent;
47474+ prefetch(parent);
47475+ spin_lock(&dentry->d_lock);
47476+ error = prepend_name(buffer, buflen, &dentry->d_name);
47477+ spin_unlock(&dentry->d_lock);
47478+ if (!error)
47479+ error = prepend(buffer, buflen, "/", 1);
47480+ if (error)
47481+ break;
47482+
47483+ slash = true;
47484+ dentry = parent;
47485+ }
47486+
47487+out:
47488+ if (!error && !slash)
47489+ error = prepend(buffer, buflen, "/", 1);
47490+
47491+ return error;
47492+}
47493+
47494+/* this must be called with vfsmount_lock and rename_lock held */
47495+
47496+static char *__our_d_path(const struct path *path, struct path *root,
47497+ char *buf, int buflen)
47498+{
47499+ char *res = buf + buflen;
47500+ int error;
47501+
47502+ prepend(&res, &buflen, "\0", 1);
47503+ error = prepend_path(path, root, &res, &buflen);
47504+ if (error)
47505+ return ERR_PTR(error);
47506+
47507+ return res;
47508+}
47509+
47510+static char *
47511+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47512+{
47513+ char *retval;
47514+
47515+ retval = __our_d_path(path, root, buf, buflen);
47516+ if (unlikely(IS_ERR(retval)))
47517+ retval = strcpy(buf, "<path too long>");
47518+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47519+ retval[1] = '\0';
47520+
47521+ return retval;
47522+}
47523+
47524+static char *
47525+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47526+ char *buf, int buflen)
47527+{
47528+ struct path path;
47529+ char *res;
47530+
47531+ path.dentry = (struct dentry *)dentry;
47532+ path.mnt = (struct vfsmount *)vfsmnt;
47533+
47534+ /* we can use real_root.dentry, real_root.mnt, because this is only called
47535+ by the RBAC system */
47536+ res = gen_full_path(&path, &real_root, buf, buflen);
47537+
47538+ return res;
47539+}
47540+
47541+static char *
47542+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47543+ char *buf, int buflen)
47544+{
47545+ char *res;
47546+ struct path path;
47547+ struct path root;
47548+ struct task_struct *reaper = &init_task;
47549+
47550+ path.dentry = (struct dentry *)dentry;
47551+ path.mnt = (struct vfsmount *)vfsmnt;
47552+
47553+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47554+ get_fs_root(reaper->fs, &root);
47555+
47556+ write_seqlock(&rename_lock);
47557+ br_read_lock(vfsmount_lock);
47558+ res = gen_full_path(&path, &root, buf, buflen);
47559+ br_read_unlock(vfsmount_lock);
47560+ write_sequnlock(&rename_lock);
47561+
47562+ path_put(&root);
47563+ return res;
47564+}
47565+
47566+static char *
47567+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47568+{
47569+ char *ret;
47570+ write_seqlock(&rename_lock);
47571+ br_read_lock(vfsmount_lock);
47572+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47573+ PAGE_SIZE);
47574+ br_read_unlock(vfsmount_lock);
47575+ write_sequnlock(&rename_lock);
47576+ return ret;
47577+}
47578+
47579+static char *
47580+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47581+{
47582+ char *ret;
47583+ char *buf;
47584+ int buflen;
47585+
47586+ write_seqlock(&rename_lock);
47587+ br_read_lock(vfsmount_lock);
47588+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47589+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47590+ buflen = (int)(ret - buf);
47591+ if (buflen >= 5)
47592+ prepend(&ret, &buflen, "/proc", 5);
47593+ else
47594+ ret = strcpy(buf, "<path too long>");
47595+ br_read_unlock(vfsmount_lock);
47596+ write_sequnlock(&rename_lock);
47597+ return ret;
47598+}
47599+
47600+char *
47601+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47602+{
47603+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47604+ PAGE_SIZE);
47605+}
47606+
47607+char *
47608+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47609+{
47610+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47611+ PAGE_SIZE);
47612+}
47613+
47614+char *
47615+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47616+{
47617+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47618+ PAGE_SIZE);
47619+}
47620+
47621+char *
47622+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47623+{
47624+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47625+ PAGE_SIZE);
47626+}
47627+
47628+char *
47629+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47630+{
47631+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47632+ PAGE_SIZE);
47633+}
47634+
47635+__inline__ __u32
47636+to_gr_audit(const __u32 reqmode)
47637+{
47638+ /* masks off auditable permission flags, then shifts them to create
47639+ auditing flags, and adds the special case of append auditing if
47640+ we're requesting write */
47641+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47642+}
47643+
47644+struct acl_subject_label *
47645+lookup_subject_map(const struct acl_subject_label *userp)
47646+{
47647+ unsigned int index = shash(userp, subj_map_set.s_size);
47648+ struct subject_map *match;
47649+
47650+ match = subj_map_set.s_hash[index];
47651+
47652+ while (match && match->user != userp)
47653+ match = match->next;
47654+
47655+ if (match != NULL)
47656+ return match->kernel;
47657+ else
47658+ return NULL;
47659+}
47660+
47661+static void
47662+insert_subj_map_entry(struct subject_map *subjmap)
47663+{
47664+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47665+ struct subject_map **curr;
47666+
47667+ subjmap->prev = NULL;
47668+
47669+ curr = &subj_map_set.s_hash[index];
47670+ if (*curr != NULL)
47671+ (*curr)->prev = subjmap;
47672+
47673+ subjmap->next = *curr;
47674+ *curr = subjmap;
47675+
47676+ return;
47677+}
47678+
47679+static struct acl_role_label *
47680+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47681+ const gid_t gid)
47682+{
47683+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47684+ struct acl_role_label *match;
47685+ struct role_allowed_ip *ipp;
47686+ unsigned int x;
47687+ u32 curr_ip = task->signal->curr_ip;
47688+
47689+ task->signal->saved_ip = curr_ip;
47690+
47691+ match = acl_role_set.r_hash[index];
47692+
47693+ while (match) {
47694+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47695+ for (x = 0; x < match->domain_child_num; x++) {
47696+ if (match->domain_children[x] == uid)
47697+ goto found;
47698+ }
47699+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47700+ break;
47701+ match = match->next;
47702+ }
47703+found:
47704+ if (match == NULL) {
47705+ try_group:
47706+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47707+ match = acl_role_set.r_hash[index];
47708+
47709+ while (match) {
47710+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47711+ for (x = 0; x < match->domain_child_num; x++) {
47712+ if (match->domain_children[x] == gid)
47713+ goto found2;
47714+ }
47715+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47716+ break;
47717+ match = match->next;
47718+ }
47719+found2:
47720+ if (match == NULL)
47721+ match = default_role;
47722+ if (match->allowed_ips == NULL)
47723+ return match;
47724+ else {
47725+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47726+ if (likely
47727+ ((ntohl(curr_ip) & ipp->netmask) ==
47728+ (ntohl(ipp->addr) & ipp->netmask)))
47729+ return match;
47730+ }
47731+ match = default_role;
47732+ }
47733+ } else if (match->allowed_ips == NULL) {
47734+ return match;
47735+ } else {
47736+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47737+ if (likely
47738+ ((ntohl(curr_ip) & ipp->netmask) ==
47739+ (ntohl(ipp->addr) & ipp->netmask)))
47740+ return match;
47741+ }
47742+ goto try_group;
47743+ }
47744+
47745+ return match;
47746+}
47747+
47748+struct acl_subject_label *
47749+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47750+ const struct acl_role_label *role)
47751+{
47752+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47753+ struct acl_subject_label *match;
47754+
47755+ match = role->subj_hash[index];
47756+
47757+ while (match && (match->inode != ino || match->device != dev ||
47758+ (match->mode & GR_DELETED))) {
47759+ match = match->next;
47760+ }
47761+
47762+ if (match && !(match->mode & GR_DELETED))
47763+ return match;
47764+ else
47765+ return NULL;
47766+}
47767+
47768+struct acl_subject_label *
47769+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47770+ const struct acl_role_label *role)
47771+{
47772+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47773+ struct acl_subject_label *match;
47774+
47775+ match = role->subj_hash[index];
47776+
47777+ while (match && (match->inode != ino || match->device != dev ||
47778+ !(match->mode & GR_DELETED))) {
47779+ match = match->next;
47780+ }
47781+
47782+ if (match && (match->mode & GR_DELETED))
47783+ return match;
47784+ else
47785+ return NULL;
47786+}
47787+
47788+static struct acl_object_label *
47789+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47790+ const struct acl_subject_label *subj)
47791+{
47792+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47793+ struct acl_object_label *match;
47794+
47795+ match = subj->obj_hash[index];
47796+
47797+ while (match && (match->inode != ino || match->device != dev ||
47798+ (match->mode & GR_DELETED))) {
47799+ match = match->next;
47800+ }
47801+
47802+ if (match && !(match->mode & GR_DELETED))
47803+ return match;
47804+ else
47805+ return NULL;
47806+}
47807+
47808+static struct acl_object_label *
47809+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47810+ const struct acl_subject_label *subj)
47811+{
47812+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47813+ struct acl_object_label *match;
47814+
47815+ match = subj->obj_hash[index];
47816+
47817+ while (match && (match->inode != ino || match->device != dev ||
47818+ !(match->mode & GR_DELETED))) {
47819+ match = match->next;
47820+ }
47821+
47822+ if (match && (match->mode & GR_DELETED))
47823+ return match;
47824+
47825+ match = subj->obj_hash[index];
47826+
47827+ while (match && (match->inode != ino || match->device != dev ||
47828+ (match->mode & GR_DELETED))) {
47829+ match = match->next;
47830+ }
47831+
47832+ if (match && !(match->mode & GR_DELETED))
47833+ return match;
47834+ else
47835+ return NULL;
47836+}
47837+
47838+static struct name_entry *
47839+lookup_name_entry(const char *name)
47840+{
47841+ unsigned int len = strlen(name);
47842+ unsigned int key = full_name_hash(name, len);
47843+ unsigned int index = key % name_set.n_size;
47844+ struct name_entry *match;
47845+
47846+ match = name_set.n_hash[index];
47847+
47848+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47849+ match = match->next;
47850+
47851+ return match;
47852+}
47853+
47854+static struct name_entry *
47855+lookup_name_entry_create(const char *name)
47856+{
47857+ unsigned int len = strlen(name);
47858+ unsigned int key = full_name_hash(name, len);
47859+ unsigned int index = key % name_set.n_size;
47860+ struct name_entry *match;
47861+
47862+ match = name_set.n_hash[index];
47863+
47864+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47865+ !match->deleted))
47866+ match = match->next;
47867+
47868+ if (match && match->deleted)
47869+ return match;
47870+
47871+ match = name_set.n_hash[index];
47872+
47873+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47874+ match->deleted))
47875+ match = match->next;
47876+
47877+ if (match && !match->deleted)
47878+ return match;
47879+ else
47880+ return NULL;
47881+}
47882+
47883+static struct inodev_entry *
47884+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47885+{
47886+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47887+ struct inodev_entry *match;
47888+
47889+ match = inodev_set.i_hash[index];
47890+
47891+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47892+ match = match->next;
47893+
47894+ return match;
47895+}
47896+
47897+static void
47898+insert_inodev_entry(struct inodev_entry *entry)
47899+{
47900+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47901+ inodev_set.i_size);
47902+ struct inodev_entry **curr;
47903+
47904+ entry->prev = NULL;
47905+
47906+ curr = &inodev_set.i_hash[index];
47907+ if (*curr != NULL)
47908+ (*curr)->prev = entry;
47909+
47910+ entry->next = *curr;
47911+ *curr = entry;
47912+
47913+ return;
47914+}
47915+
47916+static void
47917+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47918+{
47919+ unsigned int index =
47920+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47921+ struct acl_role_label **curr;
47922+ struct acl_role_label *tmp;
47923+
47924+ curr = &acl_role_set.r_hash[index];
47925+
47926+ /* if role was already inserted due to domains and already has
47927+ a role in the same bucket as it attached, then we need to
47928+ combine these two buckets
47929+ */
47930+ if (role->next) {
47931+ tmp = role->next;
47932+ while (tmp->next)
47933+ tmp = tmp->next;
47934+ tmp->next = *curr;
47935+ } else
47936+ role->next = *curr;
47937+ *curr = role;
47938+
47939+ return;
47940+}
47941+
47942+static void
47943+insert_acl_role_label(struct acl_role_label *role)
47944+{
47945+ int i;
47946+
47947+ if (role_list == NULL) {
47948+ role_list = role;
47949+ role->prev = NULL;
47950+ } else {
47951+ role->prev = role_list;
47952+ role_list = role;
47953+ }
47954+
47955+ /* used for hash chains */
47956+ role->next = NULL;
47957+
47958+ if (role->roletype & GR_ROLE_DOMAIN) {
47959+ for (i = 0; i < role->domain_child_num; i++)
47960+ __insert_acl_role_label(role, role->domain_children[i]);
47961+ } else
47962+ __insert_acl_role_label(role, role->uidgid);
47963+}
47964+
47965+static int
47966+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47967+{
47968+ struct name_entry **curr, *nentry;
47969+ struct inodev_entry *ientry;
47970+ unsigned int len = strlen(name);
47971+ unsigned int key = full_name_hash(name, len);
47972+ unsigned int index = key % name_set.n_size;
47973+
47974+ curr = &name_set.n_hash[index];
47975+
47976+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47977+ curr = &((*curr)->next);
47978+
47979+ if (*curr != NULL)
47980+ return 1;
47981+
47982+ nentry = acl_alloc(sizeof (struct name_entry));
47983+ if (nentry == NULL)
47984+ return 0;
47985+ ientry = acl_alloc(sizeof (struct inodev_entry));
47986+ if (ientry == NULL)
47987+ return 0;
47988+ ientry->nentry = nentry;
47989+
47990+ nentry->key = key;
47991+ nentry->name = name;
47992+ nentry->inode = inode;
47993+ nentry->device = device;
47994+ nentry->len = len;
47995+ nentry->deleted = deleted;
47996+
47997+ nentry->prev = NULL;
47998+ curr = &name_set.n_hash[index];
47999+ if (*curr != NULL)
48000+ (*curr)->prev = nentry;
48001+ nentry->next = *curr;
48002+ *curr = nentry;
48003+
48004+ /* insert us into the table searchable by inode/dev */
48005+ insert_inodev_entry(ientry);
48006+
48007+ return 1;
48008+}
48009+
48010+static void
48011+insert_acl_obj_label(struct acl_object_label *obj,
48012+ struct acl_subject_label *subj)
48013+{
48014+ unsigned int index =
48015+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48016+ struct acl_object_label **curr;
48017+
48018+
48019+ obj->prev = NULL;
48020+
48021+ curr = &subj->obj_hash[index];
48022+ if (*curr != NULL)
48023+ (*curr)->prev = obj;
48024+
48025+ obj->next = *curr;
48026+ *curr = obj;
48027+
48028+ return;
48029+}
48030+
48031+static void
48032+insert_acl_subj_label(struct acl_subject_label *obj,
48033+ struct acl_role_label *role)
48034+{
48035+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48036+ struct acl_subject_label **curr;
48037+
48038+ obj->prev = NULL;
48039+
48040+ curr = &role->subj_hash[index];
48041+ if (*curr != NULL)
48042+ (*curr)->prev = obj;
48043+
48044+ obj->next = *curr;
48045+ *curr = obj;
48046+
48047+ return;
48048+}
48049+
48050+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48051+
48052+static void *
48053+create_table(__u32 * len, int elementsize)
48054+{
48055+ unsigned int table_sizes[] = {
48056+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48057+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48058+ 4194301, 8388593, 16777213, 33554393, 67108859
48059+ };
48060+ void *newtable = NULL;
48061+ unsigned int pwr = 0;
48062+
48063+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48064+ table_sizes[pwr] <= *len)
48065+ pwr++;
48066+
48067+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48068+ return newtable;
48069+
48070+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48071+ newtable =
48072+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48073+ else
48074+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48075+
48076+ *len = table_sizes[pwr];
48077+
48078+ return newtable;
48079+}
48080+
48081+static int
48082+init_variables(const struct gr_arg *arg)
48083+{
48084+ struct task_struct *reaper = &init_task;
48085+ unsigned int stacksize;
48086+
48087+ subj_map_set.s_size = arg->role_db.num_subjects;
48088+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48089+ name_set.n_size = arg->role_db.num_objects;
48090+ inodev_set.i_size = arg->role_db.num_objects;
48091+
48092+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48093+ !name_set.n_size || !inodev_set.i_size)
48094+ return 1;
48095+
48096+ if (!gr_init_uidset())
48097+ return 1;
48098+
48099+ /* set up the stack that holds allocation info */
48100+
48101+ stacksize = arg->role_db.num_pointers + 5;
48102+
48103+ if (!acl_alloc_stack_init(stacksize))
48104+ return 1;
48105+
48106+ /* grab reference for the real root dentry and vfsmount */
48107+ get_fs_root(reaper->fs, &real_root);
48108+
48109+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48110+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48111+#endif
48112+
48113+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48114+ if (fakefs_obj_rw == NULL)
48115+ return 1;
48116+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48117+
48118+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48119+ if (fakefs_obj_rwx == NULL)
48120+ return 1;
48121+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48122+
48123+ subj_map_set.s_hash =
48124+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48125+ acl_role_set.r_hash =
48126+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48127+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48128+ inodev_set.i_hash =
48129+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48130+
48131+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48132+ !name_set.n_hash || !inodev_set.i_hash)
48133+ return 1;
48134+
48135+ memset(subj_map_set.s_hash, 0,
48136+ sizeof(struct subject_map *) * subj_map_set.s_size);
48137+ memset(acl_role_set.r_hash, 0,
48138+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48139+ memset(name_set.n_hash, 0,
48140+ sizeof (struct name_entry *) * name_set.n_size);
48141+ memset(inodev_set.i_hash, 0,
48142+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48143+
48144+ return 0;
48145+}
48146+
48147+/* free information not needed after startup
48148+ currently contains user->kernel pointer mappings for subjects
48149+*/
48150+
48151+static void
48152+free_init_variables(void)
48153+{
48154+ __u32 i;
48155+
48156+ if (subj_map_set.s_hash) {
48157+ for (i = 0; i < subj_map_set.s_size; i++) {
48158+ if (subj_map_set.s_hash[i]) {
48159+ kfree(subj_map_set.s_hash[i]);
48160+ subj_map_set.s_hash[i] = NULL;
48161+ }
48162+ }
48163+
48164+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48165+ PAGE_SIZE)
48166+ kfree(subj_map_set.s_hash);
48167+ else
48168+ vfree(subj_map_set.s_hash);
48169+ }
48170+
48171+ return;
48172+}
48173+
48174+static void
48175+free_variables(void)
48176+{
48177+ struct acl_subject_label *s;
48178+ struct acl_role_label *r;
48179+ struct task_struct *task, *task2;
48180+ unsigned int x;
48181+
48182+ gr_clear_learn_entries();
48183+
48184+ read_lock(&tasklist_lock);
48185+ do_each_thread(task2, task) {
48186+ task->acl_sp_role = 0;
48187+ task->acl_role_id = 0;
48188+ task->acl = NULL;
48189+ task->role = NULL;
48190+ } while_each_thread(task2, task);
48191+ read_unlock(&tasklist_lock);
48192+
48193+ /* release the reference to the real root dentry and vfsmount */
48194+ path_put(&real_root);
48195+
48196+ /* free all object hash tables */
48197+
48198+ FOR_EACH_ROLE_START(r)
48199+ if (r->subj_hash == NULL)
48200+ goto next_role;
48201+ FOR_EACH_SUBJECT_START(r, s, x)
48202+ if (s->obj_hash == NULL)
48203+ break;
48204+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48205+ kfree(s->obj_hash);
48206+ else
48207+ vfree(s->obj_hash);
48208+ FOR_EACH_SUBJECT_END(s, x)
48209+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48210+ if (s->obj_hash == NULL)
48211+ break;
48212+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48213+ kfree(s->obj_hash);
48214+ else
48215+ vfree(s->obj_hash);
48216+ FOR_EACH_NESTED_SUBJECT_END(s)
48217+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48218+ kfree(r->subj_hash);
48219+ else
48220+ vfree(r->subj_hash);
48221+ r->subj_hash = NULL;
48222+next_role:
48223+ FOR_EACH_ROLE_END(r)
48224+
48225+ acl_free_all();
48226+
48227+ if (acl_role_set.r_hash) {
48228+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48229+ PAGE_SIZE)
48230+ kfree(acl_role_set.r_hash);
48231+ else
48232+ vfree(acl_role_set.r_hash);
48233+ }
48234+ if (name_set.n_hash) {
48235+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48236+ PAGE_SIZE)
48237+ kfree(name_set.n_hash);
48238+ else
48239+ vfree(name_set.n_hash);
48240+ }
48241+
48242+ if (inodev_set.i_hash) {
48243+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48244+ PAGE_SIZE)
48245+ kfree(inodev_set.i_hash);
48246+ else
48247+ vfree(inodev_set.i_hash);
48248+ }
48249+
48250+ gr_free_uidset();
48251+
48252+ memset(&name_set, 0, sizeof (struct name_db));
48253+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48254+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48255+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48256+
48257+ default_role = NULL;
48258+ role_list = NULL;
48259+
48260+ return;
48261+}
48262+
48263+static __u32
48264+count_user_objs(struct acl_object_label *userp)
48265+{
48266+ struct acl_object_label o_tmp;
48267+ __u32 num = 0;
48268+
48269+ while (userp) {
48270+ if (copy_from_user(&o_tmp, userp,
48271+ sizeof (struct acl_object_label)))
48272+ break;
48273+
48274+ userp = o_tmp.prev;
48275+ num++;
48276+ }
48277+
48278+ return num;
48279+}
48280+
48281+static struct acl_subject_label *
48282+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48283+
48284+static int
48285+copy_user_glob(struct acl_object_label *obj)
48286+{
48287+ struct acl_object_label *g_tmp, **guser;
48288+ unsigned int len;
48289+ char *tmp;
48290+
48291+ if (obj->globbed == NULL)
48292+ return 0;
48293+
48294+ guser = &obj->globbed;
48295+ while (*guser) {
48296+ g_tmp = (struct acl_object_label *)
48297+ acl_alloc(sizeof (struct acl_object_label));
48298+ if (g_tmp == NULL)
48299+ return -ENOMEM;
48300+
48301+ if (copy_from_user(g_tmp, *guser,
48302+ sizeof (struct acl_object_label)))
48303+ return -EFAULT;
48304+
48305+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48306+
48307+ if (!len || len >= PATH_MAX)
48308+ return -EINVAL;
48309+
48310+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48311+ return -ENOMEM;
48312+
48313+ if (copy_from_user(tmp, g_tmp->filename, len))
48314+ return -EFAULT;
48315+ tmp[len-1] = '\0';
48316+ g_tmp->filename = tmp;
48317+
48318+ *guser = g_tmp;
48319+ guser = &(g_tmp->next);
48320+ }
48321+
48322+ return 0;
48323+}
48324+
48325+static int
48326+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48327+ struct acl_role_label *role)
48328+{
48329+ struct acl_object_label *o_tmp;
48330+ unsigned int len;
48331+ int ret;
48332+ char *tmp;
48333+
48334+ while (userp) {
48335+ if ((o_tmp = (struct acl_object_label *)
48336+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48337+ return -ENOMEM;
48338+
48339+ if (copy_from_user(o_tmp, userp,
48340+ sizeof (struct acl_object_label)))
48341+ return -EFAULT;
48342+
48343+ userp = o_tmp->prev;
48344+
48345+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48346+
48347+ if (!len || len >= PATH_MAX)
48348+ return -EINVAL;
48349+
48350+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48351+ return -ENOMEM;
48352+
48353+ if (copy_from_user(tmp, o_tmp->filename, len))
48354+ return -EFAULT;
48355+ tmp[len-1] = '\0';
48356+ o_tmp->filename = tmp;
48357+
48358+ insert_acl_obj_label(o_tmp, subj);
48359+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48360+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48361+ return -ENOMEM;
48362+
48363+ ret = copy_user_glob(o_tmp);
48364+ if (ret)
48365+ return ret;
48366+
48367+ if (o_tmp->nested) {
48368+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48369+ if (IS_ERR(o_tmp->nested))
48370+ return PTR_ERR(o_tmp->nested);
48371+
48372+ /* insert into nested subject list */
48373+ o_tmp->nested->next = role->hash->first;
48374+ role->hash->first = o_tmp->nested;
48375+ }
48376+ }
48377+
48378+ return 0;
48379+}
48380+
48381+static __u32
48382+count_user_subjs(struct acl_subject_label *userp)
48383+{
48384+ struct acl_subject_label s_tmp;
48385+ __u32 num = 0;
48386+
48387+ while (userp) {
48388+ if (copy_from_user(&s_tmp, userp,
48389+ sizeof (struct acl_subject_label)))
48390+ break;
48391+
48392+ userp = s_tmp.prev;
48393+ /* do not count nested subjects against this count, since
48394+ they are not included in the hash table, but are
48395+ attached to objects. We have already counted
48396+ the subjects in userspace for the allocation
48397+ stack
48398+ */
48399+ if (!(s_tmp.mode & GR_NESTED))
48400+ num++;
48401+ }
48402+
48403+ return num;
48404+}
48405+
48406+static int
48407+copy_user_allowedips(struct acl_role_label *rolep)
48408+{
48409+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48410+
48411+ ruserip = rolep->allowed_ips;
48412+
48413+ while (ruserip) {
48414+ rlast = rtmp;
48415+
48416+ if ((rtmp = (struct role_allowed_ip *)
48417+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48418+ return -ENOMEM;
48419+
48420+ if (copy_from_user(rtmp, ruserip,
48421+ sizeof (struct role_allowed_ip)))
48422+ return -EFAULT;
48423+
48424+ ruserip = rtmp->prev;
48425+
48426+ if (!rlast) {
48427+ rtmp->prev = NULL;
48428+ rolep->allowed_ips = rtmp;
48429+ } else {
48430+ rlast->next = rtmp;
48431+ rtmp->prev = rlast;
48432+ }
48433+
48434+ if (!ruserip)
48435+ rtmp->next = NULL;
48436+ }
48437+
48438+ return 0;
48439+}
48440+
48441+static int
48442+copy_user_transitions(struct acl_role_label *rolep)
48443+{
48444+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48445+
48446+ unsigned int len;
48447+ char *tmp;
48448+
48449+ rusertp = rolep->transitions;
48450+
48451+ while (rusertp) {
48452+ rlast = rtmp;
48453+
48454+ if ((rtmp = (struct role_transition *)
48455+ acl_alloc(sizeof (struct role_transition))) == NULL)
48456+ return -ENOMEM;
48457+
48458+ if (copy_from_user(rtmp, rusertp,
48459+ sizeof (struct role_transition)))
48460+ return -EFAULT;
48461+
48462+ rusertp = rtmp->prev;
48463+
48464+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48465+
48466+ if (!len || len >= GR_SPROLE_LEN)
48467+ return -EINVAL;
48468+
48469+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48470+ return -ENOMEM;
48471+
48472+ if (copy_from_user(tmp, rtmp->rolename, len))
48473+ return -EFAULT;
48474+ tmp[len-1] = '\0';
48475+ rtmp->rolename = tmp;
48476+
48477+ if (!rlast) {
48478+ rtmp->prev = NULL;
48479+ rolep->transitions = rtmp;
48480+ } else {
48481+ rlast->next = rtmp;
48482+ rtmp->prev = rlast;
48483+ }
48484+
48485+ if (!rusertp)
48486+ rtmp->next = NULL;
48487+ }
48488+
48489+ return 0;
48490+}
48491+
48492+static struct acl_subject_label *
48493+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48494+{
48495+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48496+ unsigned int len;
48497+ char *tmp;
48498+ __u32 num_objs;
48499+ struct acl_ip_label **i_tmp, *i_utmp2;
48500+ struct gr_hash_struct ghash;
48501+ struct subject_map *subjmap;
48502+ unsigned int i_num;
48503+ int err;
48504+
48505+ s_tmp = lookup_subject_map(userp);
48506+
48507+ /* we've already copied this subject into the kernel, just return
48508+ the reference to it, and don't copy it over again
48509+ */
48510+ if (s_tmp)
48511+ return(s_tmp);
48512+
48513+ if ((s_tmp = (struct acl_subject_label *)
48514+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48515+ return ERR_PTR(-ENOMEM);
48516+
48517+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48518+ if (subjmap == NULL)
48519+ return ERR_PTR(-ENOMEM);
48520+
48521+ subjmap->user = userp;
48522+ subjmap->kernel = s_tmp;
48523+ insert_subj_map_entry(subjmap);
48524+
48525+ if (copy_from_user(s_tmp, userp,
48526+ sizeof (struct acl_subject_label)))
48527+ return ERR_PTR(-EFAULT);
48528+
48529+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48530+
48531+ if (!len || len >= PATH_MAX)
48532+ return ERR_PTR(-EINVAL);
48533+
48534+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48535+ return ERR_PTR(-ENOMEM);
48536+
48537+ if (copy_from_user(tmp, s_tmp->filename, len))
48538+ return ERR_PTR(-EFAULT);
48539+ tmp[len-1] = '\0';
48540+ s_tmp->filename = tmp;
48541+
48542+ if (!strcmp(s_tmp->filename, "/"))
48543+ role->root_label = s_tmp;
48544+
48545+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48546+ return ERR_PTR(-EFAULT);
48547+
48548+ /* copy user and group transition tables */
48549+
48550+ if (s_tmp->user_trans_num) {
48551+ uid_t *uidlist;
48552+
48553+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48554+ if (uidlist == NULL)
48555+ return ERR_PTR(-ENOMEM);
48556+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48557+ return ERR_PTR(-EFAULT);
48558+
48559+ s_tmp->user_transitions = uidlist;
48560+ }
48561+
48562+ if (s_tmp->group_trans_num) {
48563+ gid_t *gidlist;
48564+
48565+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48566+ if (gidlist == NULL)
48567+ return ERR_PTR(-ENOMEM);
48568+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48569+ return ERR_PTR(-EFAULT);
48570+
48571+ s_tmp->group_transitions = gidlist;
48572+ }
48573+
48574+ /* set up object hash table */
48575+ num_objs = count_user_objs(ghash.first);
48576+
48577+ s_tmp->obj_hash_size = num_objs;
48578+ s_tmp->obj_hash =
48579+ (struct acl_object_label **)
48580+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48581+
48582+ if (!s_tmp->obj_hash)
48583+ return ERR_PTR(-ENOMEM);
48584+
48585+ memset(s_tmp->obj_hash, 0,
48586+ s_tmp->obj_hash_size *
48587+ sizeof (struct acl_object_label *));
48588+
48589+ /* add in objects */
48590+ err = copy_user_objs(ghash.first, s_tmp, role);
48591+
48592+ if (err)
48593+ return ERR_PTR(err);
48594+
48595+ /* set pointer for parent subject */
48596+ if (s_tmp->parent_subject) {
48597+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48598+
48599+ if (IS_ERR(s_tmp2))
48600+ return s_tmp2;
48601+
48602+ s_tmp->parent_subject = s_tmp2;
48603+ }
48604+
48605+ /* add in ip acls */
48606+
48607+ if (!s_tmp->ip_num) {
48608+ s_tmp->ips = NULL;
48609+ goto insert;
48610+ }
48611+
48612+ i_tmp =
48613+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48614+ sizeof (struct acl_ip_label *));
48615+
48616+ if (!i_tmp)
48617+ return ERR_PTR(-ENOMEM);
48618+
48619+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48620+ *(i_tmp + i_num) =
48621+ (struct acl_ip_label *)
48622+ acl_alloc(sizeof (struct acl_ip_label));
48623+ if (!*(i_tmp + i_num))
48624+ return ERR_PTR(-ENOMEM);
48625+
48626+ if (copy_from_user
48627+ (&i_utmp2, s_tmp->ips + i_num,
48628+ sizeof (struct acl_ip_label *)))
48629+ return ERR_PTR(-EFAULT);
48630+
48631+ if (copy_from_user
48632+ (*(i_tmp + i_num), i_utmp2,
48633+ sizeof (struct acl_ip_label)))
48634+ return ERR_PTR(-EFAULT);
48635+
48636+ if ((*(i_tmp + i_num))->iface == NULL)
48637+ continue;
48638+
48639+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48640+ if (!len || len >= IFNAMSIZ)
48641+ return ERR_PTR(-EINVAL);
48642+ tmp = acl_alloc(len);
48643+ if (tmp == NULL)
48644+ return ERR_PTR(-ENOMEM);
48645+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48646+ return ERR_PTR(-EFAULT);
48647+ (*(i_tmp + i_num))->iface = tmp;
48648+ }
48649+
48650+ s_tmp->ips = i_tmp;
48651+
48652+insert:
48653+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48654+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48655+ return ERR_PTR(-ENOMEM);
48656+
48657+ return s_tmp;
48658+}
48659+
48660+static int
48661+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48662+{
48663+ struct acl_subject_label s_pre;
48664+ struct acl_subject_label * ret;
48665+ int err;
48666+
48667+ while (userp) {
48668+ if (copy_from_user(&s_pre, userp,
48669+ sizeof (struct acl_subject_label)))
48670+ return -EFAULT;
48671+
48672+ /* do not add nested subjects here, add
48673+ while parsing objects
48674+ */
48675+
48676+ if (s_pre.mode & GR_NESTED) {
48677+ userp = s_pre.prev;
48678+ continue;
48679+ }
48680+
48681+ ret = do_copy_user_subj(userp, role);
48682+
48683+ err = PTR_ERR(ret);
48684+ if (IS_ERR(ret))
48685+ return err;
48686+
48687+ insert_acl_subj_label(ret, role);
48688+
48689+ userp = s_pre.prev;
48690+ }
48691+
48692+ return 0;
48693+}
48694+
48695+static int
48696+copy_user_acl(struct gr_arg *arg)
48697+{
48698+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48699+ struct sprole_pw *sptmp;
48700+ struct gr_hash_struct *ghash;
48701+ uid_t *domainlist;
48702+ unsigned int r_num;
48703+ unsigned int len;
48704+ char *tmp;
48705+ int err = 0;
48706+ __u16 i;
48707+ __u32 num_subjs;
48708+
48709+ /* we need a default and kernel role */
48710+ if (arg->role_db.num_roles < 2)
48711+ return -EINVAL;
48712+
48713+ /* copy special role authentication info from userspace */
48714+
48715+ num_sprole_pws = arg->num_sprole_pws;
48716+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48717+
48718+ if (!acl_special_roles) {
48719+ err = -ENOMEM;
48720+ goto cleanup;
48721+ }
48722+
48723+ for (i = 0; i < num_sprole_pws; i++) {
48724+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48725+ if (!sptmp) {
48726+ err = -ENOMEM;
48727+ goto cleanup;
48728+ }
48729+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48730+ sizeof (struct sprole_pw))) {
48731+ err = -EFAULT;
48732+ goto cleanup;
48733+ }
48734+
48735+ len =
48736+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48737+
48738+ if (!len || len >= GR_SPROLE_LEN) {
48739+ err = -EINVAL;
48740+ goto cleanup;
48741+ }
48742+
48743+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48744+ err = -ENOMEM;
48745+ goto cleanup;
48746+ }
48747+
48748+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48749+ err = -EFAULT;
48750+ goto cleanup;
48751+ }
48752+ tmp[len-1] = '\0';
48753+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48754+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48755+#endif
48756+ sptmp->rolename = tmp;
48757+ acl_special_roles[i] = sptmp;
48758+ }
48759+
48760+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48761+
48762+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48763+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48764+
48765+ if (!r_tmp) {
48766+ err = -ENOMEM;
48767+ goto cleanup;
48768+ }
48769+
48770+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48771+ sizeof (struct acl_role_label *))) {
48772+ err = -EFAULT;
48773+ goto cleanup;
48774+ }
48775+
48776+ if (copy_from_user(r_tmp, r_utmp2,
48777+ sizeof (struct acl_role_label))) {
48778+ err = -EFAULT;
48779+ goto cleanup;
48780+ }
48781+
48782+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48783+
48784+ if (!len || len >= PATH_MAX) {
48785+ err = -EINVAL;
48786+ goto cleanup;
48787+ }
48788+
48789+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48790+ err = -ENOMEM;
48791+ goto cleanup;
48792+ }
48793+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48794+ err = -EFAULT;
48795+ goto cleanup;
48796+ }
48797+ tmp[len-1] = '\0';
48798+ r_tmp->rolename = tmp;
48799+
48800+ if (!strcmp(r_tmp->rolename, "default")
48801+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48802+ default_role = r_tmp;
48803+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48804+ kernel_role = r_tmp;
48805+ }
48806+
48807+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48808+ err = -ENOMEM;
48809+ goto cleanup;
48810+ }
48811+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48812+ err = -EFAULT;
48813+ goto cleanup;
48814+ }
48815+
48816+ r_tmp->hash = ghash;
48817+
48818+ num_subjs = count_user_subjs(r_tmp->hash->first);
48819+
48820+ r_tmp->subj_hash_size = num_subjs;
48821+ r_tmp->subj_hash =
48822+ (struct acl_subject_label **)
48823+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48824+
48825+ if (!r_tmp->subj_hash) {
48826+ err = -ENOMEM;
48827+ goto cleanup;
48828+ }
48829+
48830+ err = copy_user_allowedips(r_tmp);
48831+ if (err)
48832+ goto cleanup;
48833+
48834+ /* copy domain info */
48835+ if (r_tmp->domain_children != NULL) {
48836+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48837+ if (domainlist == NULL) {
48838+ err = -ENOMEM;
48839+ goto cleanup;
48840+ }
48841+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48842+ err = -EFAULT;
48843+ goto cleanup;
48844+ }
48845+ r_tmp->domain_children = domainlist;
48846+ }
48847+
48848+ err = copy_user_transitions(r_tmp);
48849+ if (err)
48850+ goto cleanup;
48851+
48852+ memset(r_tmp->subj_hash, 0,
48853+ r_tmp->subj_hash_size *
48854+ sizeof (struct acl_subject_label *));
48855+
48856+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48857+
48858+ if (err)
48859+ goto cleanup;
48860+
48861+ /* set nested subject list to null */
48862+ r_tmp->hash->first = NULL;
48863+
48864+ insert_acl_role_label(r_tmp);
48865+ }
48866+
48867+ goto return_err;
48868+ cleanup:
48869+ free_variables();
48870+ return_err:
48871+ return err;
48872+
48873+}
48874+
48875+static int
48876+gracl_init(struct gr_arg *args)
48877+{
48878+ int error = 0;
48879+
48880+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48881+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48882+
48883+ if (init_variables(args)) {
48884+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48885+ error = -ENOMEM;
48886+ free_variables();
48887+ goto out;
48888+ }
48889+
48890+ error = copy_user_acl(args);
48891+ free_init_variables();
48892+ if (error) {
48893+ free_variables();
48894+ goto out;
48895+ }
48896+
48897+ if ((error = gr_set_acls(0))) {
48898+ free_variables();
48899+ goto out;
48900+ }
48901+
48902+ pax_open_kernel();
48903+ gr_status |= GR_READY;
48904+ pax_close_kernel();
48905+
48906+ out:
48907+ return error;
48908+}
48909+
48910+/* derived from glibc fnmatch() 0: match, 1: no match*/
48911+
48912+static int
48913+glob_match(const char *p, const char *n)
48914+{
48915+ char c;
48916+
48917+ while ((c = *p++) != '\0') {
48918+ switch (c) {
48919+ case '?':
48920+ if (*n == '\0')
48921+ return 1;
48922+ else if (*n == '/')
48923+ return 1;
48924+ break;
48925+ case '\\':
48926+ if (*n != c)
48927+ return 1;
48928+ break;
48929+ case '*':
48930+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48931+ if (*n == '/')
48932+ return 1;
48933+ else if (c == '?') {
48934+ if (*n == '\0')
48935+ return 1;
48936+ else
48937+ ++n;
48938+ }
48939+ }
48940+ if (c == '\0') {
48941+ return 0;
48942+ } else {
48943+ const char *endp;
48944+
48945+ if ((endp = strchr(n, '/')) == NULL)
48946+ endp = n + strlen(n);
48947+
48948+ if (c == '[') {
48949+ for (--p; n < endp; ++n)
48950+ if (!glob_match(p, n))
48951+ return 0;
48952+ } else if (c == '/') {
48953+ while (*n != '\0' && *n != '/')
48954+ ++n;
48955+ if (*n == '/' && !glob_match(p, n + 1))
48956+ return 0;
48957+ } else {
48958+ for (--p; n < endp; ++n)
48959+ if (*n == c && !glob_match(p, n))
48960+ return 0;
48961+ }
48962+
48963+ return 1;
48964+ }
48965+ case '[':
48966+ {
48967+ int not;
48968+ char cold;
48969+
48970+ if (*n == '\0' || *n == '/')
48971+ return 1;
48972+
48973+ not = (*p == '!' || *p == '^');
48974+ if (not)
48975+ ++p;
48976+
48977+ c = *p++;
48978+ for (;;) {
48979+ unsigned char fn = (unsigned char)*n;
48980+
48981+ if (c == '\0')
48982+ return 1;
48983+ else {
48984+ if (c == fn)
48985+ goto matched;
48986+ cold = c;
48987+ c = *p++;
48988+
48989+ if (c == '-' && *p != ']') {
48990+ unsigned char cend = *p++;
48991+
48992+ if (cend == '\0')
48993+ return 1;
48994+
48995+ if (cold <= fn && fn <= cend)
48996+ goto matched;
48997+
48998+ c = *p++;
48999+ }
49000+ }
49001+
49002+ if (c == ']')
49003+ break;
49004+ }
49005+ if (!not)
49006+ return 1;
49007+ break;
49008+ matched:
49009+ while (c != ']') {
49010+ if (c == '\0')
49011+ return 1;
49012+
49013+ c = *p++;
49014+ }
49015+ if (not)
49016+ return 1;
49017+ }
49018+ break;
49019+ default:
49020+ if (c != *n)
49021+ return 1;
49022+ }
49023+
49024+ ++n;
49025+ }
49026+
49027+ if (*n == '\0')
49028+ return 0;
49029+
49030+ if (*n == '/')
49031+ return 0;
49032+
49033+ return 1;
49034+}
49035+
49036+static struct acl_object_label *
49037+chk_glob_label(struct acl_object_label *globbed,
49038+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49039+{
49040+ struct acl_object_label *tmp;
49041+
49042+ if (*path == NULL)
49043+ *path = gr_to_filename_nolock(dentry, mnt);
49044+
49045+ tmp = globbed;
49046+
49047+ while (tmp) {
49048+ if (!glob_match(tmp->filename, *path))
49049+ return tmp;
49050+ tmp = tmp->next;
49051+ }
49052+
49053+ return NULL;
49054+}
49055+
49056+static struct acl_object_label *
49057+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49058+ const ino_t curr_ino, const dev_t curr_dev,
49059+ const struct acl_subject_label *subj, char **path, const int checkglob)
49060+{
49061+ struct acl_subject_label *tmpsubj;
49062+ struct acl_object_label *retval;
49063+ struct acl_object_label *retval2;
49064+
49065+ tmpsubj = (struct acl_subject_label *) subj;
49066+ read_lock(&gr_inode_lock);
49067+ do {
49068+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49069+ if (retval) {
49070+ if (checkglob && retval->globbed) {
49071+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49072+ (struct vfsmount *)orig_mnt, path);
49073+ if (retval2)
49074+ retval = retval2;
49075+ }
49076+ break;
49077+ }
49078+ } while ((tmpsubj = tmpsubj->parent_subject));
49079+ read_unlock(&gr_inode_lock);
49080+
49081+ return retval;
49082+}
49083+
49084+static __inline__ struct acl_object_label *
49085+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49086+ struct dentry *curr_dentry,
49087+ const struct acl_subject_label *subj, char **path, const int checkglob)
49088+{
49089+ int newglob = checkglob;
49090+ ino_t inode;
49091+ dev_t device;
49092+
49093+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49094+ as we don't want a / * rule to match instead of the / object
49095+ don't do this for create lookups that call this function though, since they're looking up
49096+ on the parent and thus need globbing checks on all paths
49097+ */
49098+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49099+ newglob = GR_NO_GLOB;
49100+
49101+ spin_lock(&curr_dentry->d_lock);
49102+ inode = curr_dentry->d_inode->i_ino;
49103+ device = __get_dev(curr_dentry);
49104+ spin_unlock(&curr_dentry->d_lock);
49105+
49106+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49107+}
49108+
49109+static struct acl_object_label *
49110+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49111+ const struct acl_subject_label *subj, char *path, const int checkglob)
49112+{
49113+ struct dentry *dentry = (struct dentry *) l_dentry;
49114+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49115+ struct acl_object_label *retval;
49116+ struct dentry *parent;
49117+
49118+ write_seqlock(&rename_lock);
49119+ br_read_lock(vfsmount_lock);
49120+
49121+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49122+#ifdef CONFIG_NET
49123+ mnt == sock_mnt ||
49124+#endif
49125+#ifdef CONFIG_HUGETLBFS
49126+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49127+#endif
49128+ /* ignore Eric Biederman */
49129+ IS_PRIVATE(l_dentry->d_inode))) {
49130+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49131+ goto out;
49132+ }
49133+
49134+ for (;;) {
49135+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49136+ break;
49137+
49138+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49139+ if (mnt->mnt_parent == mnt)
49140+ break;
49141+
49142+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49143+ if (retval != NULL)
49144+ goto out;
49145+
49146+ dentry = mnt->mnt_mountpoint;
49147+ mnt = mnt->mnt_parent;
49148+ continue;
49149+ }
49150+
49151+ parent = dentry->d_parent;
49152+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49153+ if (retval != NULL)
49154+ goto out;
49155+
49156+ dentry = parent;
49157+ }
49158+
49159+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49160+
49161+ /* real_root is pinned so we don't have to hold a reference */
49162+ if (retval == NULL)
49163+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49164+out:
49165+ br_read_unlock(vfsmount_lock);
49166+ write_sequnlock(&rename_lock);
49167+
49168+ BUG_ON(retval == NULL);
49169+
49170+ return retval;
49171+}
49172+
49173+static __inline__ struct acl_object_label *
49174+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49175+ const struct acl_subject_label *subj)
49176+{
49177+ char *path = NULL;
49178+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49179+}
49180+
49181+static __inline__ struct acl_object_label *
49182+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49183+ const struct acl_subject_label *subj)
49184+{
49185+ char *path = NULL;
49186+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49187+}
49188+
49189+static __inline__ struct acl_object_label *
49190+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49191+ const struct acl_subject_label *subj, char *path)
49192+{
49193+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49194+}
49195+
49196+static struct acl_subject_label *
49197+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49198+ const struct acl_role_label *role)
49199+{
49200+ struct dentry *dentry = (struct dentry *) l_dentry;
49201+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49202+ struct acl_subject_label *retval;
49203+ struct dentry *parent;
49204+
49205+ write_seqlock(&rename_lock);
49206+ br_read_lock(vfsmount_lock);
49207+
49208+ for (;;) {
49209+ if (dentry == real_root.dentry && mnt == real_root.mnt)
49210+ break;
49211+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49212+ if (mnt->mnt_parent == mnt)
49213+ break;
49214+
49215+ spin_lock(&dentry->d_lock);
49216+ read_lock(&gr_inode_lock);
49217+ retval =
49218+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49219+ __get_dev(dentry), role);
49220+ read_unlock(&gr_inode_lock);
49221+ spin_unlock(&dentry->d_lock);
49222+ if (retval != NULL)
49223+ goto out;
49224+
49225+ dentry = mnt->mnt_mountpoint;
49226+ mnt = mnt->mnt_parent;
49227+ continue;
49228+ }
49229+
49230+ spin_lock(&dentry->d_lock);
49231+ read_lock(&gr_inode_lock);
49232+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49233+ __get_dev(dentry), role);
49234+ read_unlock(&gr_inode_lock);
49235+ parent = dentry->d_parent;
49236+ spin_unlock(&dentry->d_lock);
49237+
49238+ if (retval != NULL)
49239+ goto out;
49240+
49241+ dentry = parent;
49242+ }
49243+
49244+ spin_lock(&dentry->d_lock);
49245+ read_lock(&gr_inode_lock);
49246+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49247+ __get_dev(dentry), role);
49248+ read_unlock(&gr_inode_lock);
49249+ spin_unlock(&dentry->d_lock);
49250+
49251+ if (unlikely(retval == NULL)) {
49252+ /* real_root is pinned, we don't need to hold a reference */
49253+ read_lock(&gr_inode_lock);
49254+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49255+ __get_dev(real_root.dentry), role);
49256+ read_unlock(&gr_inode_lock);
49257+ }
49258+out:
49259+ br_read_unlock(vfsmount_lock);
49260+ write_sequnlock(&rename_lock);
49261+
49262+ BUG_ON(retval == NULL);
49263+
49264+ return retval;
49265+}
49266+
49267+static void
49268+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49269+{
49270+ struct task_struct *task = current;
49271+ const struct cred *cred = current_cred();
49272+
49273+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49274+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49275+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49276+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49277+
49278+ return;
49279+}
49280+
49281+static void
49282+gr_log_learn_sysctl(const char *path, const __u32 mode)
49283+{
49284+ struct task_struct *task = current;
49285+ const struct cred *cred = current_cred();
49286+
49287+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49288+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49289+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49290+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49291+
49292+ return;
49293+}
49294+
49295+static void
49296+gr_log_learn_id_change(const char type, const unsigned int real,
49297+ const unsigned int effective, const unsigned int fs)
49298+{
49299+ struct task_struct *task = current;
49300+ const struct cred *cred = current_cred();
49301+
49302+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49303+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49304+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49305+ type, real, effective, fs, &task->signal->saved_ip);
49306+
49307+ return;
49308+}
49309+
49310+__u32
49311+gr_search_file(const struct dentry * dentry, const __u32 mode,
49312+ const struct vfsmount * mnt)
49313+{
49314+ __u32 retval = mode;
49315+ struct acl_subject_label *curracl;
49316+ struct acl_object_label *currobj;
49317+
49318+ if (unlikely(!(gr_status & GR_READY)))
49319+ return (mode & ~GR_AUDITS);
49320+
49321+ curracl = current->acl;
49322+
49323+ currobj = chk_obj_label(dentry, mnt, curracl);
49324+ retval = currobj->mode & mode;
49325+
49326+ /* if we're opening a specified transfer file for writing
49327+ (e.g. /dev/initctl), then transfer our role to init
49328+ */
49329+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49330+ current->role->roletype & GR_ROLE_PERSIST)) {
49331+ struct task_struct *task = init_pid_ns.child_reaper;
49332+
49333+ if (task->role != current->role) {
49334+ task->acl_sp_role = 0;
49335+ task->acl_role_id = current->acl_role_id;
49336+ task->role = current->role;
49337+ rcu_read_lock();
49338+ read_lock(&grsec_exec_file_lock);
49339+ gr_apply_subject_to_task(task);
49340+ read_unlock(&grsec_exec_file_lock);
49341+ rcu_read_unlock();
49342+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49343+ }
49344+ }
49345+
49346+ if (unlikely
49347+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49348+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49349+ __u32 new_mode = mode;
49350+
49351+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49352+
49353+ retval = new_mode;
49354+
49355+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49356+ new_mode |= GR_INHERIT;
49357+
49358+ if (!(mode & GR_NOLEARN))
49359+ gr_log_learn(dentry, mnt, new_mode);
49360+ }
49361+
49362+ return retval;
49363+}
49364+
49365+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49366+ const struct dentry *parent,
49367+ const struct vfsmount *mnt)
49368+{
49369+ struct name_entry *match;
49370+ struct acl_object_label *matchpo;
49371+ struct acl_subject_label *curracl;
49372+ char *path;
49373+
49374+ if (unlikely(!(gr_status & GR_READY)))
49375+ return NULL;
49376+
49377+ preempt_disable();
49378+ path = gr_to_filename_rbac(new_dentry, mnt);
49379+ match = lookup_name_entry_create(path);
49380+
49381+ curracl = current->acl;
49382+
49383+ if (match) {
49384+ read_lock(&gr_inode_lock);
49385+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49386+ read_unlock(&gr_inode_lock);
49387+
49388+ if (matchpo) {
49389+ preempt_enable();
49390+ return matchpo;
49391+ }
49392+ }
49393+
49394+ // lookup parent
49395+
49396+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49397+
49398+ preempt_enable();
49399+ return matchpo;
49400+}
49401+
49402+__u32
49403+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49404+ const struct vfsmount * mnt, const __u32 mode)
49405+{
49406+ struct acl_object_label *matchpo;
49407+ __u32 retval;
49408+
49409+ if (unlikely(!(gr_status & GR_READY)))
49410+ return (mode & ~GR_AUDITS);
49411+
49412+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
49413+
49414+ retval = matchpo->mode & mode;
49415+
49416+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49417+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49418+ __u32 new_mode = mode;
49419+
49420+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49421+
49422+ gr_log_learn(new_dentry, mnt, new_mode);
49423+ return new_mode;
49424+ }
49425+
49426+ return retval;
49427+}
49428+
49429+__u32
49430+gr_check_link(const struct dentry * new_dentry,
49431+ const struct dentry * parent_dentry,
49432+ const struct vfsmount * parent_mnt,
49433+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49434+{
49435+ struct acl_object_label *obj;
49436+ __u32 oldmode, newmode;
49437+ __u32 needmode;
49438+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49439+ GR_DELETE | GR_INHERIT;
49440+
49441+ if (unlikely(!(gr_status & GR_READY)))
49442+ return (GR_CREATE | GR_LINK);
49443+
49444+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49445+ oldmode = obj->mode;
49446+
49447+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49448+ newmode = obj->mode;
49449+
49450+ needmode = newmode & checkmodes;
49451+
49452+ // old name for hardlink must have at least the permissions of the new name
49453+ if ((oldmode & needmode) != needmode)
49454+ goto bad;
49455+
49456+ // if old name had restrictions/auditing, make sure the new name does as well
49457+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49458+
49459+ // don't allow hardlinking of suid/sgid files without permission
49460+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49461+ needmode |= GR_SETID;
49462+
49463+ if ((newmode & needmode) != needmode)
49464+ goto bad;
49465+
49466+ // enforce minimum permissions
49467+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49468+ return newmode;
49469+bad:
49470+ needmode = oldmode;
49471+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49472+ needmode |= GR_SETID;
49473+
49474+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49475+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49476+ return (GR_CREATE | GR_LINK);
49477+ } else if (newmode & GR_SUPPRESS)
49478+ return GR_SUPPRESS;
49479+ else
49480+ return 0;
49481+}
49482+
49483+int
49484+gr_check_hidden_task(const struct task_struct *task)
49485+{
49486+ if (unlikely(!(gr_status & GR_READY)))
49487+ return 0;
49488+
49489+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49490+ return 1;
49491+
49492+ return 0;
49493+}
49494+
49495+int
49496+gr_check_protected_task(const struct task_struct *task)
49497+{
49498+ if (unlikely(!(gr_status & GR_READY) || !task))
49499+ return 0;
49500+
49501+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49502+ task->acl != current->acl)
49503+ return 1;
49504+
49505+ return 0;
49506+}
49507+
49508+int
49509+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49510+{
49511+ struct task_struct *p;
49512+ int ret = 0;
49513+
49514+ if (unlikely(!(gr_status & GR_READY) || !pid))
49515+ return ret;
49516+
49517+ read_lock(&tasklist_lock);
49518+ do_each_pid_task(pid, type, p) {
49519+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49520+ p->acl != current->acl) {
49521+ ret = 1;
49522+ goto out;
49523+ }
49524+ } while_each_pid_task(pid, type, p);
49525+out:
49526+ read_unlock(&tasklist_lock);
49527+
49528+ return ret;
49529+}
49530+
49531+void
49532+gr_copy_label(struct task_struct *tsk)
49533+{
49534+ tsk->signal->used_accept = 0;
49535+ tsk->acl_sp_role = 0;
49536+ tsk->acl_role_id = current->acl_role_id;
49537+ tsk->acl = current->acl;
49538+ tsk->role = current->role;
49539+ tsk->signal->curr_ip = current->signal->curr_ip;
49540+ tsk->signal->saved_ip = current->signal->saved_ip;
49541+ if (current->exec_file)
49542+ get_file(current->exec_file);
49543+ tsk->exec_file = current->exec_file;
49544+ tsk->is_writable = current->is_writable;
49545+ if (unlikely(current->signal->used_accept)) {
49546+ current->signal->curr_ip = 0;
49547+ current->signal->saved_ip = 0;
49548+ }
49549+
49550+ return;
49551+}
49552+
49553+static void
49554+gr_set_proc_res(struct task_struct *task)
49555+{
49556+ struct acl_subject_label *proc;
49557+ unsigned short i;
49558+
49559+ proc = task->acl;
49560+
49561+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49562+ return;
49563+
49564+ for (i = 0; i < RLIM_NLIMITS; i++) {
49565+ if (!(proc->resmask & (1 << i)))
49566+ continue;
49567+
49568+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49569+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49570+ }
49571+
49572+ return;
49573+}
49574+
49575+extern int __gr_process_user_ban(struct user_struct *user);
49576+
49577+int
49578+gr_check_user_change(int real, int effective, int fs)
49579+{
49580+ unsigned int i;
49581+ __u16 num;
49582+ uid_t *uidlist;
49583+ int curuid;
49584+ int realok = 0;
49585+ int effectiveok = 0;
49586+ int fsok = 0;
49587+
49588+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49589+ struct user_struct *user;
49590+
49591+ if (real == -1)
49592+ goto skipit;
49593+
49594+ user = find_user(real);
49595+ if (user == NULL)
49596+ goto skipit;
49597+
49598+ if (__gr_process_user_ban(user)) {
49599+ /* for find_user */
49600+ free_uid(user);
49601+ return 1;
49602+ }
49603+
49604+ /* for find_user */
49605+ free_uid(user);
49606+
49607+skipit:
49608+#endif
49609+
49610+ if (unlikely(!(gr_status & GR_READY)))
49611+ return 0;
49612+
49613+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49614+ gr_log_learn_id_change('u', real, effective, fs);
49615+
49616+ num = current->acl->user_trans_num;
49617+ uidlist = current->acl->user_transitions;
49618+
49619+ if (uidlist == NULL)
49620+ return 0;
49621+
49622+ if (real == -1)
49623+ realok = 1;
49624+ if (effective == -1)
49625+ effectiveok = 1;
49626+ if (fs == -1)
49627+ fsok = 1;
49628+
49629+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49630+ for (i = 0; i < num; i++) {
49631+ curuid = (int)uidlist[i];
49632+ if (real == curuid)
49633+ realok = 1;
49634+ if (effective == curuid)
49635+ effectiveok = 1;
49636+ if (fs == curuid)
49637+ fsok = 1;
49638+ }
49639+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49640+ for (i = 0; i < num; i++) {
49641+ curuid = (int)uidlist[i];
49642+ if (real == curuid)
49643+ break;
49644+ if (effective == curuid)
49645+ break;
49646+ if (fs == curuid)
49647+ break;
49648+ }
49649+ /* not in deny list */
49650+ if (i == num) {
49651+ realok = 1;
49652+ effectiveok = 1;
49653+ fsok = 1;
49654+ }
49655+ }
49656+
49657+ if (realok && effectiveok && fsok)
49658+ return 0;
49659+ else {
49660+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49661+ return 1;
49662+ }
49663+}
49664+
49665+int
49666+gr_check_group_change(int real, int effective, int fs)
49667+{
49668+ unsigned int i;
49669+ __u16 num;
49670+ gid_t *gidlist;
49671+ int curgid;
49672+ int realok = 0;
49673+ int effectiveok = 0;
49674+ int fsok = 0;
49675+
49676+ if (unlikely(!(gr_status & GR_READY)))
49677+ return 0;
49678+
49679+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49680+ gr_log_learn_id_change('g', real, effective, fs);
49681+
49682+ num = current->acl->group_trans_num;
49683+ gidlist = current->acl->group_transitions;
49684+
49685+ if (gidlist == NULL)
49686+ return 0;
49687+
49688+ if (real == -1)
49689+ realok = 1;
49690+ if (effective == -1)
49691+ effectiveok = 1;
49692+ if (fs == -1)
49693+ fsok = 1;
49694+
49695+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49696+ for (i = 0; i < num; i++) {
49697+ curgid = (int)gidlist[i];
49698+ if (real == curgid)
49699+ realok = 1;
49700+ if (effective == curgid)
49701+ effectiveok = 1;
49702+ if (fs == curgid)
49703+ fsok = 1;
49704+ }
49705+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49706+ for (i = 0; i < num; i++) {
49707+ curgid = (int)gidlist[i];
49708+ if (real == curgid)
49709+ break;
49710+ if (effective == curgid)
49711+ break;
49712+ if (fs == curgid)
49713+ break;
49714+ }
49715+ /* not in deny list */
49716+ if (i == num) {
49717+ realok = 1;
49718+ effectiveok = 1;
49719+ fsok = 1;
49720+ }
49721+ }
49722+
49723+ if (realok && effectiveok && fsok)
49724+ return 0;
49725+ else {
49726+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49727+ return 1;
49728+ }
49729+}
49730+
49731+void
49732+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49733+{
49734+ struct acl_role_label *role = task->role;
49735+ struct acl_subject_label *subj = NULL;
49736+ struct acl_object_label *obj;
49737+ struct file *filp;
49738+
49739+ if (unlikely(!(gr_status & GR_READY)))
49740+ return;
49741+
49742+ filp = task->exec_file;
49743+
49744+ /* kernel process, we'll give them the kernel role */
49745+ if (unlikely(!filp)) {
49746+ task->role = kernel_role;
49747+ task->acl = kernel_role->root_label;
49748+ return;
49749+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49750+ role = lookup_acl_role_label(task, uid, gid);
49751+
49752+ /* perform subject lookup in possibly new role
49753+ we can use this result below in the case where role == task->role
49754+ */
49755+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49756+
49757+ /* if we changed uid/gid, but result in the same role
49758+ and are using inheritance, don't lose the inherited subject
49759+ if current subject is other than what normal lookup
49760+ would result in, we arrived via inheritance, don't
49761+ lose subject
49762+ */
49763+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49764+ (subj == task->acl)))
49765+ task->acl = subj;
49766+
49767+ task->role = role;
49768+
49769+ task->is_writable = 0;
49770+
49771+ /* ignore additional mmap checks for processes that are writable
49772+ by the default ACL */
49773+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49774+ if (unlikely(obj->mode & GR_WRITE))
49775+ task->is_writable = 1;
49776+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49777+ if (unlikely(obj->mode & GR_WRITE))
49778+ task->is_writable = 1;
49779+
49780+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49781+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49782+#endif
49783+
49784+ gr_set_proc_res(task);
49785+
49786+ return;
49787+}
49788+
49789+int
49790+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49791+ const int unsafe_share)
49792+{
49793+ struct task_struct *task = current;
49794+ struct acl_subject_label *newacl;
49795+ struct acl_object_label *obj;
49796+ __u32 retmode;
49797+
49798+ if (unlikely(!(gr_status & GR_READY)))
49799+ return 0;
49800+
49801+ newacl = chk_subj_label(dentry, mnt, task->role);
49802+
49803+ task_lock(task);
49804+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49805+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49806+ !(task->role->roletype & GR_ROLE_GOD) &&
49807+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49808+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49809+ task_unlock(task);
49810+ if (unsafe_share)
49811+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49812+ else
49813+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49814+ return -EACCES;
49815+ }
49816+ task_unlock(task);
49817+
49818+ obj = chk_obj_label(dentry, mnt, task->acl);
49819+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49820+
49821+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49822+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49823+ if (obj->nested)
49824+ task->acl = obj->nested;
49825+ else
49826+ task->acl = newacl;
49827+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49828+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49829+
49830+ task->is_writable = 0;
49831+
49832+ /* ignore additional mmap checks for processes that are writable
49833+ by the default ACL */
49834+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49835+ if (unlikely(obj->mode & GR_WRITE))
49836+ task->is_writable = 1;
49837+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49838+ if (unlikely(obj->mode & GR_WRITE))
49839+ task->is_writable = 1;
49840+
49841+ gr_set_proc_res(task);
49842+
49843+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49844+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49845+#endif
49846+ return 0;
49847+}
49848+
49849+/* always called with valid inodev ptr */
49850+static void
49851+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49852+{
49853+ struct acl_object_label *matchpo;
49854+ struct acl_subject_label *matchps;
49855+ struct acl_subject_label *subj;
49856+ struct acl_role_label *role;
49857+ unsigned int x;
49858+
49859+ FOR_EACH_ROLE_START(role)
49860+ FOR_EACH_SUBJECT_START(role, subj, x)
49861+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49862+ matchpo->mode |= GR_DELETED;
49863+ FOR_EACH_SUBJECT_END(subj,x)
49864+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49865+ if (subj->inode == ino && subj->device == dev)
49866+ subj->mode |= GR_DELETED;
49867+ FOR_EACH_NESTED_SUBJECT_END(subj)
49868+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49869+ matchps->mode |= GR_DELETED;
49870+ FOR_EACH_ROLE_END(role)
49871+
49872+ inodev->nentry->deleted = 1;
49873+
49874+ return;
49875+}
49876+
49877+void
49878+gr_handle_delete(const ino_t ino, const dev_t dev)
49879+{
49880+ struct inodev_entry *inodev;
49881+
49882+ if (unlikely(!(gr_status & GR_READY)))
49883+ return;
49884+
49885+ write_lock(&gr_inode_lock);
49886+ inodev = lookup_inodev_entry(ino, dev);
49887+ if (inodev != NULL)
49888+ do_handle_delete(inodev, ino, dev);
49889+ write_unlock(&gr_inode_lock);
49890+
49891+ return;
49892+}
49893+
49894+static void
49895+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49896+ const ino_t newinode, const dev_t newdevice,
49897+ struct acl_subject_label *subj)
49898+{
49899+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49900+ struct acl_object_label *match;
49901+
49902+ match = subj->obj_hash[index];
49903+
49904+ while (match && (match->inode != oldinode ||
49905+ match->device != olddevice ||
49906+ !(match->mode & GR_DELETED)))
49907+ match = match->next;
49908+
49909+ if (match && (match->inode == oldinode)
49910+ && (match->device == olddevice)
49911+ && (match->mode & GR_DELETED)) {
49912+ if (match->prev == NULL) {
49913+ subj->obj_hash[index] = match->next;
49914+ if (match->next != NULL)
49915+ match->next->prev = NULL;
49916+ } else {
49917+ match->prev->next = match->next;
49918+ if (match->next != NULL)
49919+ match->next->prev = match->prev;
49920+ }
49921+ match->prev = NULL;
49922+ match->next = NULL;
49923+ match->inode = newinode;
49924+ match->device = newdevice;
49925+ match->mode &= ~GR_DELETED;
49926+
49927+ insert_acl_obj_label(match, subj);
49928+ }
49929+
49930+ return;
49931+}
49932+
49933+static void
49934+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49935+ const ino_t newinode, const dev_t newdevice,
49936+ struct acl_role_label *role)
49937+{
49938+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49939+ struct acl_subject_label *match;
49940+
49941+ match = role->subj_hash[index];
49942+
49943+ while (match && (match->inode != oldinode ||
49944+ match->device != olddevice ||
49945+ !(match->mode & GR_DELETED)))
49946+ match = match->next;
49947+
49948+ if (match && (match->inode == oldinode)
49949+ && (match->device == olddevice)
49950+ && (match->mode & GR_DELETED)) {
49951+ if (match->prev == NULL) {
49952+ role->subj_hash[index] = match->next;
49953+ if (match->next != NULL)
49954+ match->next->prev = NULL;
49955+ } else {
49956+ match->prev->next = match->next;
49957+ if (match->next != NULL)
49958+ match->next->prev = match->prev;
49959+ }
49960+ match->prev = NULL;
49961+ match->next = NULL;
49962+ match->inode = newinode;
49963+ match->device = newdevice;
49964+ match->mode &= ~GR_DELETED;
49965+
49966+ insert_acl_subj_label(match, role);
49967+ }
49968+
49969+ return;
49970+}
49971+
49972+static void
49973+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49974+ const ino_t newinode, const dev_t newdevice)
49975+{
49976+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49977+ struct inodev_entry *match;
49978+
49979+ match = inodev_set.i_hash[index];
49980+
49981+ while (match && (match->nentry->inode != oldinode ||
49982+ match->nentry->device != olddevice || !match->nentry->deleted))
49983+ match = match->next;
49984+
49985+ if (match && (match->nentry->inode == oldinode)
49986+ && (match->nentry->device == olddevice) &&
49987+ match->nentry->deleted) {
49988+ if (match->prev == NULL) {
49989+ inodev_set.i_hash[index] = match->next;
49990+ if (match->next != NULL)
49991+ match->next->prev = NULL;
49992+ } else {
49993+ match->prev->next = match->next;
49994+ if (match->next != NULL)
49995+ match->next->prev = match->prev;
49996+ }
49997+ match->prev = NULL;
49998+ match->next = NULL;
49999+ match->nentry->inode = newinode;
50000+ match->nentry->device = newdevice;
50001+ match->nentry->deleted = 0;
50002+
50003+ insert_inodev_entry(match);
50004+ }
50005+
50006+ return;
50007+}
50008+
50009+static void
50010+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50011+{
50012+ struct acl_subject_label *subj;
50013+ struct acl_role_label *role;
50014+ unsigned int x;
50015+
50016+ FOR_EACH_ROLE_START(role)
50017+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50018+
50019+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50020+ if ((subj->inode == ino) && (subj->device == dev)) {
50021+ subj->inode = ino;
50022+ subj->device = dev;
50023+ }
50024+ FOR_EACH_NESTED_SUBJECT_END(subj)
50025+ FOR_EACH_SUBJECT_START(role, subj, x)
50026+ update_acl_obj_label(matchn->inode, matchn->device,
50027+ ino, dev, subj);
50028+ FOR_EACH_SUBJECT_END(subj,x)
50029+ FOR_EACH_ROLE_END(role)
50030+
50031+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50032+
50033+ return;
50034+}
50035+
50036+static void
50037+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50038+ const struct vfsmount *mnt)
50039+{
50040+ ino_t ino = dentry->d_inode->i_ino;
50041+ dev_t dev = __get_dev(dentry);
50042+
50043+ __do_handle_create(matchn, ino, dev);
50044+
50045+ return;
50046+}
50047+
50048+void
50049+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50050+{
50051+ struct name_entry *matchn;
50052+
50053+ if (unlikely(!(gr_status & GR_READY)))
50054+ return;
50055+
50056+ preempt_disable();
50057+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50058+
50059+ if (unlikely((unsigned long)matchn)) {
50060+ write_lock(&gr_inode_lock);
50061+ do_handle_create(matchn, dentry, mnt);
50062+ write_unlock(&gr_inode_lock);
50063+ }
50064+ preempt_enable();
50065+
50066+ return;
50067+}
50068+
50069+void
50070+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50071+{
50072+ struct name_entry *matchn;
50073+
50074+ if (unlikely(!(gr_status & GR_READY)))
50075+ return;
50076+
50077+ preempt_disable();
50078+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50079+
50080+ if (unlikely((unsigned long)matchn)) {
50081+ write_lock(&gr_inode_lock);
50082+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50083+ write_unlock(&gr_inode_lock);
50084+ }
50085+ preempt_enable();
50086+
50087+ return;
50088+}
50089+
50090+void
50091+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50092+ struct dentry *old_dentry,
50093+ struct dentry *new_dentry,
50094+ struct vfsmount *mnt, const __u8 replace)
50095+{
50096+ struct name_entry *matchn;
50097+ struct inodev_entry *inodev;
50098+ struct inode *inode = new_dentry->d_inode;
50099+ ino_t old_ino = old_dentry->d_inode->i_ino;
50100+ dev_t old_dev = __get_dev(old_dentry);
50101+
50102+ /* vfs_rename swaps the name and parent link for old_dentry and
50103+ new_dentry
50104+ at this point, old_dentry has the new name, parent link, and inode
50105+ for the renamed file
50106+ if a file is being replaced by a rename, new_dentry has the inode
50107+ and name for the replaced file
50108+ */
50109+
50110+ if (unlikely(!(gr_status & GR_READY)))
50111+ return;
50112+
50113+ preempt_disable();
50114+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50115+
50116+ /* we wouldn't have to check d_inode if it weren't for
50117+ NFS silly-renaming
50118+ */
50119+
50120+ write_lock(&gr_inode_lock);
50121+ if (unlikely(replace && inode)) {
50122+ ino_t new_ino = inode->i_ino;
50123+ dev_t new_dev = __get_dev(new_dentry);
50124+
50125+ inodev = lookup_inodev_entry(new_ino, new_dev);
50126+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50127+ do_handle_delete(inodev, new_ino, new_dev);
50128+ }
50129+
50130+ inodev = lookup_inodev_entry(old_ino, old_dev);
50131+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50132+ do_handle_delete(inodev, old_ino, old_dev);
50133+
50134+ if (unlikely((unsigned long)matchn))
50135+ do_handle_create(matchn, old_dentry, mnt);
50136+
50137+ write_unlock(&gr_inode_lock);
50138+ preempt_enable();
50139+
50140+ return;
50141+}
50142+
50143+static int
50144+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50145+ unsigned char **sum)
50146+{
50147+ struct acl_role_label *r;
50148+ struct role_allowed_ip *ipp;
50149+ struct role_transition *trans;
50150+ unsigned int i;
50151+ int found = 0;
50152+ u32 curr_ip = current->signal->curr_ip;
50153+
50154+ current->signal->saved_ip = curr_ip;
50155+
50156+ /* check transition table */
50157+
50158+ for (trans = current->role->transitions; trans; trans = trans->next) {
50159+ if (!strcmp(rolename, trans->rolename)) {
50160+ found = 1;
50161+ break;
50162+ }
50163+ }
50164+
50165+ if (!found)
50166+ return 0;
50167+
50168+ /* handle special roles that do not require authentication
50169+ and check ip */
50170+
50171+ FOR_EACH_ROLE_START(r)
50172+ if (!strcmp(rolename, r->rolename) &&
50173+ (r->roletype & GR_ROLE_SPECIAL)) {
50174+ found = 0;
50175+ if (r->allowed_ips != NULL) {
50176+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50177+ if ((ntohl(curr_ip) & ipp->netmask) ==
50178+ (ntohl(ipp->addr) & ipp->netmask))
50179+ found = 1;
50180+ }
50181+ } else
50182+ found = 2;
50183+ if (!found)
50184+ return 0;
50185+
50186+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50187+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50188+ *salt = NULL;
50189+ *sum = NULL;
50190+ return 1;
50191+ }
50192+ }
50193+ FOR_EACH_ROLE_END(r)
50194+
50195+ for (i = 0; i < num_sprole_pws; i++) {
50196+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50197+ *salt = acl_special_roles[i]->salt;
50198+ *sum = acl_special_roles[i]->sum;
50199+ return 1;
50200+ }
50201+ }
50202+
50203+ return 0;
50204+}
50205+
50206+static void
50207+assign_special_role(char *rolename)
50208+{
50209+ struct acl_object_label *obj;
50210+ struct acl_role_label *r;
50211+ struct acl_role_label *assigned = NULL;
50212+ struct task_struct *tsk;
50213+ struct file *filp;
50214+
50215+ FOR_EACH_ROLE_START(r)
50216+ if (!strcmp(rolename, r->rolename) &&
50217+ (r->roletype & GR_ROLE_SPECIAL)) {
50218+ assigned = r;
50219+ break;
50220+ }
50221+ FOR_EACH_ROLE_END(r)
50222+
50223+ if (!assigned)
50224+ return;
50225+
50226+ read_lock(&tasklist_lock);
50227+ read_lock(&grsec_exec_file_lock);
50228+
50229+ tsk = current->real_parent;
50230+ if (tsk == NULL)
50231+ goto out_unlock;
50232+
50233+ filp = tsk->exec_file;
50234+ if (filp == NULL)
50235+ goto out_unlock;
50236+
50237+ tsk->is_writable = 0;
50238+
50239+ tsk->acl_sp_role = 1;
50240+ tsk->acl_role_id = ++acl_sp_role_value;
50241+ tsk->role = assigned;
50242+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50243+
50244+ /* ignore additional mmap checks for processes that are writable
50245+ by the default ACL */
50246+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50247+ if (unlikely(obj->mode & GR_WRITE))
50248+ tsk->is_writable = 1;
50249+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50250+ if (unlikely(obj->mode & GR_WRITE))
50251+ tsk->is_writable = 1;
50252+
50253+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50254+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50255+#endif
50256+
50257+out_unlock:
50258+ read_unlock(&grsec_exec_file_lock);
50259+ read_unlock(&tasklist_lock);
50260+ return;
50261+}
50262+
50263+int gr_check_secure_terminal(struct task_struct *task)
50264+{
50265+ struct task_struct *p, *p2, *p3;
50266+ struct files_struct *files;
50267+ struct fdtable *fdt;
50268+ struct file *our_file = NULL, *file;
50269+ int i;
50270+
50271+ if (task->signal->tty == NULL)
50272+ return 1;
50273+
50274+ files = get_files_struct(task);
50275+ if (files != NULL) {
50276+ rcu_read_lock();
50277+ fdt = files_fdtable(files);
50278+ for (i=0; i < fdt->max_fds; i++) {
50279+ file = fcheck_files(files, i);
50280+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50281+ get_file(file);
50282+ our_file = file;
50283+ }
50284+ }
50285+ rcu_read_unlock();
50286+ put_files_struct(files);
50287+ }
50288+
50289+ if (our_file == NULL)
50290+ return 1;
50291+
50292+ read_lock(&tasklist_lock);
50293+ do_each_thread(p2, p) {
50294+ files = get_files_struct(p);
50295+ if (files == NULL ||
50296+ (p->signal && p->signal->tty == task->signal->tty)) {
50297+ if (files != NULL)
50298+ put_files_struct(files);
50299+ continue;
50300+ }
50301+ rcu_read_lock();
50302+ fdt = files_fdtable(files);
50303+ for (i=0; i < fdt->max_fds; i++) {
50304+ file = fcheck_files(files, i);
50305+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50306+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50307+ p3 = task;
50308+ while (p3->pid > 0) {
50309+ if (p3 == p)
50310+ break;
50311+ p3 = p3->real_parent;
50312+ }
50313+ if (p3 == p)
50314+ break;
50315+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50316+ gr_handle_alertkill(p);
50317+ rcu_read_unlock();
50318+ put_files_struct(files);
50319+ read_unlock(&tasklist_lock);
50320+ fput(our_file);
50321+ return 0;
50322+ }
50323+ }
50324+ rcu_read_unlock();
50325+ put_files_struct(files);
50326+ } while_each_thread(p2, p);
50327+ read_unlock(&tasklist_lock);
50328+
50329+ fput(our_file);
50330+ return 1;
50331+}
50332+
50333+ssize_t
50334+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50335+{
50336+ struct gr_arg_wrapper uwrap;
50337+ unsigned char *sprole_salt = NULL;
50338+ unsigned char *sprole_sum = NULL;
50339+ int error = sizeof (struct gr_arg_wrapper);
50340+ int error2 = 0;
50341+
50342+ mutex_lock(&gr_dev_mutex);
50343+
50344+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50345+ error = -EPERM;
50346+ goto out;
50347+ }
50348+
50349+ if (count != sizeof (struct gr_arg_wrapper)) {
50350+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50351+ error = -EINVAL;
50352+ goto out;
50353+ }
50354+
50355+
50356+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50357+ gr_auth_expires = 0;
50358+ gr_auth_attempts = 0;
50359+ }
50360+
50361+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50362+ error = -EFAULT;
50363+ goto out;
50364+ }
50365+
50366+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50367+ error = -EINVAL;
50368+ goto out;
50369+ }
50370+
50371+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50372+ error = -EFAULT;
50373+ goto out;
50374+ }
50375+
50376+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50377+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50378+ time_after(gr_auth_expires, get_seconds())) {
50379+ error = -EBUSY;
50380+ goto out;
50381+ }
50382+
50383+ /* if non-root trying to do anything other than use a special role,
50384+ do not attempt authentication, do not count towards authentication
50385+ locking
50386+ */
50387+
50388+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50389+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50390+ current_uid()) {
50391+ error = -EPERM;
50392+ goto out;
50393+ }
50394+
50395+ /* ensure pw and special role name are null terminated */
50396+
50397+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50398+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50399+
50400+ /* Okay.
50401+ * We have our enough of the argument structure..(we have yet
50402+ * to copy_from_user the tables themselves) . Copy the tables
50403+ * only if we need them, i.e. for loading operations. */
50404+
50405+ switch (gr_usermode->mode) {
50406+ case GR_STATUS:
50407+ if (gr_status & GR_READY) {
50408+ error = 1;
50409+ if (!gr_check_secure_terminal(current))
50410+ error = 3;
50411+ } else
50412+ error = 2;
50413+ goto out;
50414+ case GR_SHUTDOWN:
50415+ if ((gr_status & GR_READY)
50416+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50417+ pax_open_kernel();
50418+ gr_status &= ~GR_READY;
50419+ pax_close_kernel();
50420+
50421+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50422+ free_variables();
50423+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50424+ memset(gr_system_salt, 0, GR_SALT_LEN);
50425+ memset(gr_system_sum, 0, GR_SHA_LEN);
50426+ } else if (gr_status & GR_READY) {
50427+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50428+ error = -EPERM;
50429+ } else {
50430+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50431+ error = -EAGAIN;
50432+ }
50433+ break;
50434+ case GR_ENABLE:
50435+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50436+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50437+ else {
50438+ if (gr_status & GR_READY)
50439+ error = -EAGAIN;
50440+ else
50441+ error = error2;
50442+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50443+ }
50444+ break;
50445+ case GR_RELOAD:
50446+ if (!(gr_status & GR_READY)) {
50447+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50448+ error = -EAGAIN;
50449+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50450+ preempt_disable();
50451+
50452+ pax_open_kernel();
50453+ gr_status &= ~GR_READY;
50454+ pax_close_kernel();
50455+
50456+ free_variables();
50457+ if (!(error2 = gracl_init(gr_usermode))) {
50458+ preempt_enable();
50459+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50460+ } else {
50461+ preempt_enable();
50462+ error = error2;
50463+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50464+ }
50465+ } else {
50466+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50467+ error = -EPERM;
50468+ }
50469+ break;
50470+ case GR_SEGVMOD:
50471+ if (unlikely(!(gr_status & GR_READY))) {
50472+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50473+ error = -EAGAIN;
50474+ break;
50475+ }
50476+
50477+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50478+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50479+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50480+ struct acl_subject_label *segvacl;
50481+ segvacl =
50482+ lookup_acl_subj_label(gr_usermode->segv_inode,
50483+ gr_usermode->segv_device,
50484+ current->role);
50485+ if (segvacl) {
50486+ segvacl->crashes = 0;
50487+ segvacl->expires = 0;
50488+ }
50489+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50490+ gr_remove_uid(gr_usermode->segv_uid);
50491+ }
50492+ } else {
50493+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50494+ error = -EPERM;
50495+ }
50496+ break;
50497+ case GR_SPROLE:
50498+ case GR_SPROLEPAM:
50499+ if (unlikely(!(gr_status & GR_READY))) {
50500+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50501+ error = -EAGAIN;
50502+ break;
50503+ }
50504+
50505+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50506+ current->role->expires = 0;
50507+ current->role->auth_attempts = 0;
50508+ }
50509+
50510+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50511+ time_after(current->role->expires, get_seconds())) {
50512+ error = -EBUSY;
50513+ goto out;
50514+ }
50515+
50516+ if (lookup_special_role_auth
50517+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50518+ && ((!sprole_salt && !sprole_sum)
50519+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50520+ char *p = "";
50521+ assign_special_role(gr_usermode->sp_role);
50522+ read_lock(&tasklist_lock);
50523+ if (current->real_parent)
50524+ p = current->real_parent->role->rolename;
50525+ read_unlock(&tasklist_lock);
50526+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50527+ p, acl_sp_role_value);
50528+ } else {
50529+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50530+ error = -EPERM;
50531+ if(!(current->role->auth_attempts++))
50532+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50533+
50534+ goto out;
50535+ }
50536+ break;
50537+ case GR_UNSPROLE:
50538+ if (unlikely(!(gr_status & GR_READY))) {
50539+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50540+ error = -EAGAIN;
50541+ break;
50542+ }
50543+
50544+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50545+ char *p = "";
50546+ int i = 0;
50547+
50548+ read_lock(&tasklist_lock);
50549+ if (current->real_parent) {
50550+ p = current->real_parent->role->rolename;
50551+ i = current->real_parent->acl_role_id;
50552+ }
50553+ read_unlock(&tasklist_lock);
50554+
50555+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50556+ gr_set_acls(1);
50557+ } else {
50558+ error = -EPERM;
50559+ goto out;
50560+ }
50561+ break;
50562+ default:
50563+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50564+ error = -EINVAL;
50565+ break;
50566+ }
50567+
50568+ if (error != -EPERM)
50569+ goto out;
50570+
50571+ if(!(gr_auth_attempts++))
50572+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50573+
50574+ out:
50575+ mutex_unlock(&gr_dev_mutex);
50576+ return error;
50577+}
50578+
50579+/* must be called with
50580+ rcu_read_lock();
50581+ read_lock(&tasklist_lock);
50582+ read_lock(&grsec_exec_file_lock);
50583+*/
50584+int gr_apply_subject_to_task(struct task_struct *task)
50585+{
50586+ struct acl_object_label *obj;
50587+ char *tmpname;
50588+ struct acl_subject_label *tmpsubj;
50589+ struct file *filp;
50590+ struct name_entry *nmatch;
50591+
50592+ filp = task->exec_file;
50593+ if (filp == NULL)
50594+ return 0;
50595+
50596+ /* the following is to apply the correct subject
50597+ on binaries running when the RBAC system
50598+ is enabled, when the binaries have been
50599+ replaced or deleted since their execution
50600+ -----
50601+ when the RBAC system starts, the inode/dev
50602+ from exec_file will be one the RBAC system
50603+ is unaware of. It only knows the inode/dev
50604+ of the present file on disk, or the absence
50605+ of it.
50606+ */
50607+ preempt_disable();
50608+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50609+
50610+ nmatch = lookup_name_entry(tmpname);
50611+ preempt_enable();
50612+ tmpsubj = NULL;
50613+ if (nmatch) {
50614+ if (nmatch->deleted)
50615+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50616+ else
50617+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50618+ if (tmpsubj != NULL)
50619+ task->acl = tmpsubj;
50620+ }
50621+ if (tmpsubj == NULL)
50622+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50623+ task->role);
50624+ if (task->acl) {
50625+ task->is_writable = 0;
50626+ /* ignore additional mmap checks for processes that are writable
50627+ by the default ACL */
50628+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50629+ if (unlikely(obj->mode & GR_WRITE))
50630+ task->is_writable = 1;
50631+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50632+ if (unlikely(obj->mode & GR_WRITE))
50633+ task->is_writable = 1;
50634+
50635+ gr_set_proc_res(task);
50636+
50637+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50638+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50639+#endif
50640+ } else {
50641+ return 1;
50642+ }
50643+
50644+ return 0;
50645+}
50646+
50647+int
50648+gr_set_acls(const int type)
50649+{
50650+ struct task_struct *task, *task2;
50651+ struct acl_role_label *role = current->role;
50652+ __u16 acl_role_id = current->acl_role_id;
50653+ const struct cred *cred;
50654+ int ret;
50655+
50656+ rcu_read_lock();
50657+ read_lock(&tasklist_lock);
50658+ read_lock(&grsec_exec_file_lock);
50659+ do_each_thread(task2, task) {
50660+ /* check to see if we're called from the exit handler,
50661+ if so, only replace ACLs that have inherited the admin
50662+ ACL */
50663+
50664+ if (type && (task->role != role ||
50665+ task->acl_role_id != acl_role_id))
50666+ continue;
50667+
50668+ task->acl_role_id = 0;
50669+ task->acl_sp_role = 0;
50670+
50671+ if (task->exec_file) {
50672+ cred = __task_cred(task);
50673+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50674+ ret = gr_apply_subject_to_task(task);
50675+ if (ret) {
50676+ read_unlock(&grsec_exec_file_lock);
50677+ read_unlock(&tasklist_lock);
50678+ rcu_read_unlock();
50679+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50680+ return ret;
50681+ }
50682+ } else {
50683+ // it's a kernel process
50684+ task->role = kernel_role;
50685+ task->acl = kernel_role->root_label;
50686+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50687+ task->acl->mode &= ~GR_PROCFIND;
50688+#endif
50689+ }
50690+ } while_each_thread(task2, task);
50691+ read_unlock(&grsec_exec_file_lock);
50692+ read_unlock(&tasklist_lock);
50693+ rcu_read_unlock();
50694+
50695+ return 0;
50696+}
50697+
50698+void
50699+gr_learn_resource(const struct task_struct *task,
50700+ const int res, const unsigned long wanted, const int gt)
50701+{
50702+ struct acl_subject_label *acl;
50703+ const struct cred *cred;
50704+
50705+ if (unlikely((gr_status & GR_READY) &&
50706+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50707+ goto skip_reslog;
50708+
50709+#ifdef CONFIG_GRKERNSEC_RESLOG
50710+ gr_log_resource(task, res, wanted, gt);
50711+#endif
50712+ skip_reslog:
50713+
50714+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50715+ return;
50716+
50717+ acl = task->acl;
50718+
50719+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50720+ !(acl->resmask & (1 << (unsigned short) res))))
50721+ return;
50722+
50723+ if (wanted >= acl->res[res].rlim_cur) {
50724+ unsigned long res_add;
50725+
50726+ res_add = wanted;
50727+ switch (res) {
50728+ case RLIMIT_CPU:
50729+ res_add += GR_RLIM_CPU_BUMP;
50730+ break;
50731+ case RLIMIT_FSIZE:
50732+ res_add += GR_RLIM_FSIZE_BUMP;
50733+ break;
50734+ case RLIMIT_DATA:
50735+ res_add += GR_RLIM_DATA_BUMP;
50736+ break;
50737+ case RLIMIT_STACK:
50738+ res_add += GR_RLIM_STACK_BUMP;
50739+ break;
50740+ case RLIMIT_CORE:
50741+ res_add += GR_RLIM_CORE_BUMP;
50742+ break;
50743+ case RLIMIT_RSS:
50744+ res_add += GR_RLIM_RSS_BUMP;
50745+ break;
50746+ case RLIMIT_NPROC:
50747+ res_add += GR_RLIM_NPROC_BUMP;
50748+ break;
50749+ case RLIMIT_NOFILE:
50750+ res_add += GR_RLIM_NOFILE_BUMP;
50751+ break;
50752+ case RLIMIT_MEMLOCK:
50753+ res_add += GR_RLIM_MEMLOCK_BUMP;
50754+ break;
50755+ case RLIMIT_AS:
50756+ res_add += GR_RLIM_AS_BUMP;
50757+ break;
50758+ case RLIMIT_LOCKS:
50759+ res_add += GR_RLIM_LOCKS_BUMP;
50760+ break;
50761+ case RLIMIT_SIGPENDING:
50762+ res_add += GR_RLIM_SIGPENDING_BUMP;
50763+ break;
50764+ case RLIMIT_MSGQUEUE:
50765+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50766+ break;
50767+ case RLIMIT_NICE:
50768+ res_add += GR_RLIM_NICE_BUMP;
50769+ break;
50770+ case RLIMIT_RTPRIO:
50771+ res_add += GR_RLIM_RTPRIO_BUMP;
50772+ break;
50773+ case RLIMIT_RTTIME:
50774+ res_add += GR_RLIM_RTTIME_BUMP;
50775+ break;
50776+ }
50777+
50778+ acl->res[res].rlim_cur = res_add;
50779+
50780+ if (wanted > acl->res[res].rlim_max)
50781+ acl->res[res].rlim_max = res_add;
50782+
50783+ /* only log the subject filename, since resource logging is supported for
50784+ single-subject learning only */
50785+ rcu_read_lock();
50786+ cred = __task_cred(task);
50787+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50788+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50789+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50790+ "", (unsigned long) res, &task->signal->saved_ip);
50791+ rcu_read_unlock();
50792+ }
50793+
50794+ return;
50795+}
50796+
50797+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50798+void
50799+pax_set_initial_flags(struct linux_binprm *bprm)
50800+{
50801+ struct task_struct *task = current;
50802+ struct acl_subject_label *proc;
50803+ unsigned long flags;
50804+
50805+ if (unlikely(!(gr_status & GR_READY)))
50806+ return;
50807+
50808+ flags = pax_get_flags(task);
50809+
50810+ proc = task->acl;
50811+
50812+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50813+ flags &= ~MF_PAX_PAGEEXEC;
50814+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50815+ flags &= ~MF_PAX_SEGMEXEC;
50816+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50817+ flags &= ~MF_PAX_RANDMMAP;
50818+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50819+ flags &= ~MF_PAX_EMUTRAMP;
50820+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50821+ flags &= ~MF_PAX_MPROTECT;
50822+
50823+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50824+ flags |= MF_PAX_PAGEEXEC;
50825+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50826+ flags |= MF_PAX_SEGMEXEC;
50827+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50828+ flags |= MF_PAX_RANDMMAP;
50829+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50830+ flags |= MF_PAX_EMUTRAMP;
50831+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50832+ flags |= MF_PAX_MPROTECT;
50833+
50834+ pax_set_flags(task, flags);
50835+
50836+ return;
50837+}
50838+#endif
50839+
50840+#ifdef CONFIG_SYSCTL
50841+/* Eric Biederman likes breaking userland ABI and every inode-based security
50842+ system to save 35kb of memory */
50843+
50844+/* we modify the passed in filename, but adjust it back before returning */
50845+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50846+{
50847+ struct name_entry *nmatch;
50848+ char *p, *lastp = NULL;
50849+ struct acl_object_label *obj = NULL, *tmp;
50850+ struct acl_subject_label *tmpsubj;
50851+ char c = '\0';
50852+
50853+ read_lock(&gr_inode_lock);
50854+
50855+ p = name + len - 1;
50856+ do {
50857+ nmatch = lookup_name_entry(name);
50858+ if (lastp != NULL)
50859+ *lastp = c;
50860+
50861+ if (nmatch == NULL)
50862+ goto next_component;
50863+ tmpsubj = current->acl;
50864+ do {
50865+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50866+ if (obj != NULL) {
50867+ tmp = obj->globbed;
50868+ while (tmp) {
50869+ if (!glob_match(tmp->filename, name)) {
50870+ obj = tmp;
50871+ goto found_obj;
50872+ }
50873+ tmp = tmp->next;
50874+ }
50875+ goto found_obj;
50876+ }
50877+ } while ((tmpsubj = tmpsubj->parent_subject));
50878+next_component:
50879+ /* end case */
50880+ if (p == name)
50881+ break;
50882+
50883+ while (*p != '/')
50884+ p--;
50885+ if (p == name)
50886+ lastp = p + 1;
50887+ else {
50888+ lastp = p;
50889+ p--;
50890+ }
50891+ c = *lastp;
50892+ *lastp = '\0';
50893+ } while (1);
50894+found_obj:
50895+ read_unlock(&gr_inode_lock);
50896+ /* obj returned will always be non-null */
50897+ return obj;
50898+}
50899+
50900+/* returns 0 when allowing, non-zero on error
50901+ op of 0 is used for readdir, so we don't log the names of hidden files
50902+*/
50903+__u32
50904+gr_handle_sysctl(const struct ctl_table *table, const int op)
50905+{
50906+ struct ctl_table *tmp;
50907+ const char *proc_sys = "/proc/sys";
50908+ char *path;
50909+ struct acl_object_label *obj;
50910+ unsigned short len = 0, pos = 0, depth = 0, i;
50911+ __u32 err = 0;
50912+ __u32 mode = 0;
50913+
50914+ if (unlikely(!(gr_status & GR_READY)))
50915+ return 0;
50916+
50917+ /* for now, ignore operations on non-sysctl entries if it's not a
50918+ readdir*/
50919+ if (table->child != NULL && op != 0)
50920+ return 0;
50921+
50922+ mode |= GR_FIND;
50923+ /* it's only a read if it's an entry, read on dirs is for readdir */
50924+ if (op & MAY_READ)
50925+ mode |= GR_READ;
50926+ if (op & MAY_WRITE)
50927+ mode |= GR_WRITE;
50928+
50929+ preempt_disable();
50930+
50931+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50932+
50933+ /* it's only a read/write if it's an actual entry, not a dir
50934+ (which are opened for readdir)
50935+ */
50936+
50937+ /* convert the requested sysctl entry into a pathname */
50938+
50939+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50940+ len += strlen(tmp->procname);
50941+ len++;
50942+ depth++;
50943+ }
50944+
50945+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50946+ /* deny */
50947+ goto out;
50948+ }
50949+
50950+ memset(path, 0, PAGE_SIZE);
50951+
50952+ memcpy(path, proc_sys, strlen(proc_sys));
50953+
50954+ pos += strlen(proc_sys);
50955+
50956+ for (; depth > 0; depth--) {
50957+ path[pos] = '/';
50958+ pos++;
50959+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50960+ if (depth == i) {
50961+ memcpy(path + pos, tmp->procname,
50962+ strlen(tmp->procname));
50963+ pos += strlen(tmp->procname);
50964+ }
50965+ i++;
50966+ }
50967+ }
50968+
50969+ obj = gr_lookup_by_name(path, pos);
50970+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50971+
50972+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50973+ ((err & mode) != mode))) {
50974+ __u32 new_mode = mode;
50975+
50976+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50977+
50978+ err = 0;
50979+ gr_log_learn_sysctl(path, new_mode);
50980+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50981+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50982+ err = -ENOENT;
50983+ } else if (!(err & GR_FIND)) {
50984+ err = -ENOENT;
50985+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50986+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50987+ path, (mode & GR_READ) ? " reading" : "",
50988+ (mode & GR_WRITE) ? " writing" : "");
50989+ err = -EACCES;
50990+ } else if ((err & mode) != mode) {
50991+ err = -EACCES;
50992+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50993+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50994+ path, (mode & GR_READ) ? " reading" : "",
50995+ (mode & GR_WRITE) ? " writing" : "");
50996+ err = 0;
50997+ } else
50998+ err = 0;
50999+
51000+ out:
51001+ preempt_enable();
51002+
51003+ return err;
51004+}
51005+#endif
51006+
51007+int
51008+gr_handle_proc_ptrace(struct task_struct *task)
51009+{
51010+ struct file *filp;
51011+ struct task_struct *tmp = task;
51012+ struct task_struct *curtemp = current;
51013+ __u32 retmode;
51014+
51015+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51016+ if (unlikely(!(gr_status & GR_READY)))
51017+ return 0;
51018+#endif
51019+
51020+ read_lock(&tasklist_lock);
51021+ read_lock(&grsec_exec_file_lock);
51022+ filp = task->exec_file;
51023+
51024+ while (tmp->pid > 0) {
51025+ if (tmp == curtemp)
51026+ break;
51027+ tmp = tmp->real_parent;
51028+ }
51029+
51030+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51031+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51032+ read_unlock(&grsec_exec_file_lock);
51033+ read_unlock(&tasklist_lock);
51034+ return 1;
51035+ }
51036+
51037+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51038+ if (!(gr_status & GR_READY)) {
51039+ read_unlock(&grsec_exec_file_lock);
51040+ read_unlock(&tasklist_lock);
51041+ return 0;
51042+ }
51043+#endif
51044+
51045+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51046+ read_unlock(&grsec_exec_file_lock);
51047+ read_unlock(&tasklist_lock);
51048+
51049+ if (retmode & GR_NOPTRACE)
51050+ return 1;
51051+
51052+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51053+ && (current->acl != task->acl || (current->acl != current->role->root_label
51054+ && current->pid != task->pid)))
51055+ return 1;
51056+
51057+ return 0;
51058+}
51059+
51060+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51061+{
51062+ if (unlikely(!(gr_status & GR_READY)))
51063+ return;
51064+
51065+ if (!(current->role->roletype & GR_ROLE_GOD))
51066+ return;
51067+
51068+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51069+ p->role->rolename, gr_task_roletype_to_char(p),
51070+ p->acl->filename);
51071+}
51072+
51073+int
51074+gr_handle_ptrace(struct task_struct *task, const long request)
51075+{
51076+ struct task_struct *tmp = task;
51077+ struct task_struct *curtemp = current;
51078+ __u32 retmode;
51079+
51080+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51081+ if (unlikely(!(gr_status & GR_READY)))
51082+ return 0;
51083+#endif
51084+
51085+ read_lock(&tasklist_lock);
51086+ while (tmp->pid > 0) {
51087+ if (tmp == curtemp)
51088+ break;
51089+ tmp = tmp->real_parent;
51090+ }
51091+
51092+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51093+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51094+ read_unlock(&tasklist_lock);
51095+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51096+ return 1;
51097+ }
51098+ read_unlock(&tasklist_lock);
51099+
51100+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51101+ if (!(gr_status & GR_READY))
51102+ return 0;
51103+#endif
51104+
51105+ read_lock(&grsec_exec_file_lock);
51106+ if (unlikely(!task->exec_file)) {
51107+ read_unlock(&grsec_exec_file_lock);
51108+ return 0;
51109+ }
51110+
51111+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51112+ read_unlock(&grsec_exec_file_lock);
51113+
51114+ if (retmode & GR_NOPTRACE) {
51115+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51116+ return 1;
51117+ }
51118+
51119+ if (retmode & GR_PTRACERD) {
51120+ switch (request) {
51121+ case PTRACE_SEIZE:
51122+ case PTRACE_POKETEXT:
51123+ case PTRACE_POKEDATA:
51124+ case PTRACE_POKEUSR:
51125+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51126+ case PTRACE_SETREGS:
51127+ case PTRACE_SETFPREGS:
51128+#endif
51129+#ifdef CONFIG_X86
51130+ case PTRACE_SETFPXREGS:
51131+#endif
51132+#ifdef CONFIG_ALTIVEC
51133+ case PTRACE_SETVRREGS:
51134+#endif
51135+ return 1;
51136+ default:
51137+ return 0;
51138+ }
51139+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51140+ !(current->role->roletype & GR_ROLE_GOD) &&
51141+ (current->acl != task->acl)) {
51142+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51143+ return 1;
51144+ }
51145+
51146+ return 0;
51147+}
51148+
51149+static int is_writable_mmap(const struct file *filp)
51150+{
51151+ struct task_struct *task = current;
51152+ struct acl_object_label *obj, *obj2;
51153+
51154+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51155+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51156+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51157+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51158+ task->role->root_label);
51159+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51160+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51161+ return 1;
51162+ }
51163+ }
51164+ return 0;
51165+}
51166+
51167+int
51168+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51169+{
51170+ __u32 mode;
51171+
51172+ if (unlikely(!file || !(prot & PROT_EXEC)))
51173+ return 1;
51174+
51175+ if (is_writable_mmap(file))
51176+ return 0;
51177+
51178+ mode =
51179+ gr_search_file(file->f_path.dentry,
51180+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51181+ file->f_path.mnt);
51182+
51183+ if (!gr_tpe_allow(file))
51184+ return 0;
51185+
51186+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51187+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51188+ return 0;
51189+ } else if (unlikely(!(mode & GR_EXEC))) {
51190+ return 0;
51191+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51192+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51193+ return 1;
51194+ }
51195+
51196+ return 1;
51197+}
51198+
51199+int
51200+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51201+{
51202+ __u32 mode;
51203+
51204+ if (unlikely(!file || !(prot & PROT_EXEC)))
51205+ return 1;
51206+
51207+ if (is_writable_mmap(file))
51208+ return 0;
51209+
51210+ mode =
51211+ gr_search_file(file->f_path.dentry,
51212+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51213+ file->f_path.mnt);
51214+
51215+ if (!gr_tpe_allow(file))
51216+ return 0;
51217+
51218+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51219+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51220+ return 0;
51221+ } else if (unlikely(!(mode & GR_EXEC))) {
51222+ return 0;
51223+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51224+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51225+ return 1;
51226+ }
51227+
51228+ return 1;
51229+}
51230+
51231+void
51232+gr_acl_handle_psacct(struct task_struct *task, const long code)
51233+{
51234+ unsigned long runtime;
51235+ unsigned long cputime;
51236+ unsigned int wday, cday;
51237+ __u8 whr, chr;
51238+ __u8 wmin, cmin;
51239+ __u8 wsec, csec;
51240+ struct timespec timeval;
51241+
51242+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51243+ !(task->acl->mode & GR_PROCACCT)))
51244+ return;
51245+
51246+ do_posix_clock_monotonic_gettime(&timeval);
51247+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51248+ wday = runtime / (3600 * 24);
51249+ runtime -= wday * (3600 * 24);
51250+ whr = runtime / 3600;
51251+ runtime -= whr * 3600;
51252+ wmin = runtime / 60;
51253+ runtime -= wmin * 60;
51254+ wsec = runtime;
51255+
51256+ cputime = (task->utime + task->stime) / HZ;
51257+ cday = cputime / (3600 * 24);
51258+ cputime -= cday * (3600 * 24);
51259+ chr = cputime / 3600;
51260+ cputime -= chr * 3600;
51261+ cmin = cputime / 60;
51262+ cputime -= cmin * 60;
51263+ csec = cputime;
51264+
51265+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51266+
51267+ return;
51268+}
51269+
51270+void gr_set_kernel_label(struct task_struct *task)
51271+{
51272+ if (gr_status & GR_READY) {
51273+ task->role = kernel_role;
51274+ task->acl = kernel_role->root_label;
51275+ }
51276+ return;
51277+}
51278+
51279+#ifdef CONFIG_TASKSTATS
51280+int gr_is_taskstats_denied(int pid)
51281+{
51282+ struct task_struct *task;
51283+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51284+ const struct cred *cred;
51285+#endif
51286+ int ret = 0;
51287+
51288+ /* restrict taskstats viewing to un-chrooted root users
51289+ who have the 'view' subject flag if the RBAC system is enabled
51290+ */
51291+
51292+ rcu_read_lock();
51293+ read_lock(&tasklist_lock);
51294+ task = find_task_by_vpid(pid);
51295+ if (task) {
51296+#ifdef CONFIG_GRKERNSEC_CHROOT
51297+ if (proc_is_chrooted(task))
51298+ ret = -EACCES;
51299+#endif
51300+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51301+ cred = __task_cred(task);
51302+#ifdef CONFIG_GRKERNSEC_PROC_USER
51303+ if (cred->uid != 0)
51304+ ret = -EACCES;
51305+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51306+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51307+ ret = -EACCES;
51308+#endif
51309+#endif
51310+ if (gr_status & GR_READY) {
51311+ if (!(task->acl->mode & GR_VIEW))
51312+ ret = -EACCES;
51313+ }
51314+ } else
51315+ ret = -ENOENT;
51316+
51317+ read_unlock(&tasklist_lock);
51318+ rcu_read_unlock();
51319+
51320+ return ret;
51321+}
51322+#endif
51323+
51324+/* AUXV entries are filled via a descendant of search_binary_handler
51325+ after we've already applied the subject for the target
51326+*/
51327+int gr_acl_enable_at_secure(void)
51328+{
51329+ if (unlikely(!(gr_status & GR_READY)))
51330+ return 0;
51331+
51332+ if (current->acl->mode & GR_ATSECURE)
51333+ return 1;
51334+
51335+ return 0;
51336+}
51337+
51338+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51339+{
51340+ struct task_struct *task = current;
51341+ struct dentry *dentry = file->f_path.dentry;
51342+ struct vfsmount *mnt = file->f_path.mnt;
51343+ struct acl_object_label *obj, *tmp;
51344+ struct acl_subject_label *subj;
51345+ unsigned int bufsize;
51346+ int is_not_root;
51347+ char *path;
51348+ dev_t dev = __get_dev(dentry);
51349+
51350+ if (unlikely(!(gr_status & GR_READY)))
51351+ return 1;
51352+
51353+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51354+ return 1;
51355+
51356+ /* ignore Eric Biederman */
51357+ if (IS_PRIVATE(dentry->d_inode))
51358+ return 1;
51359+
51360+ subj = task->acl;
51361+ do {
51362+ obj = lookup_acl_obj_label(ino, dev, subj);
51363+ if (obj != NULL)
51364+ return (obj->mode & GR_FIND) ? 1 : 0;
51365+ } while ((subj = subj->parent_subject));
51366+
51367+ /* this is purely an optimization since we're looking for an object
51368+ for the directory we're doing a readdir on
51369+ if it's possible for any globbed object to match the entry we're
51370+ filling into the directory, then the object we find here will be
51371+ an anchor point with attached globbed objects
51372+ */
51373+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51374+ if (obj->globbed == NULL)
51375+ return (obj->mode & GR_FIND) ? 1 : 0;
51376+
51377+ is_not_root = ((obj->filename[0] == '/') &&
51378+ (obj->filename[1] == '\0')) ? 0 : 1;
51379+ bufsize = PAGE_SIZE - namelen - is_not_root;
51380+
51381+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51382+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51383+ return 1;
51384+
51385+ preempt_disable();
51386+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51387+ bufsize);
51388+
51389+ bufsize = strlen(path);
51390+
51391+ /* if base is "/", don't append an additional slash */
51392+ if (is_not_root)
51393+ *(path + bufsize) = '/';
51394+ memcpy(path + bufsize + is_not_root, name, namelen);
51395+ *(path + bufsize + namelen + is_not_root) = '\0';
51396+
51397+ tmp = obj->globbed;
51398+ while (tmp) {
51399+ if (!glob_match(tmp->filename, path)) {
51400+ preempt_enable();
51401+ return (tmp->mode & GR_FIND) ? 1 : 0;
51402+ }
51403+ tmp = tmp->next;
51404+ }
51405+ preempt_enable();
51406+ return (obj->mode & GR_FIND) ? 1 : 0;
51407+}
51408+
51409+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51410+EXPORT_SYMBOL(gr_acl_is_enabled);
51411+#endif
51412+EXPORT_SYMBOL(gr_learn_resource);
51413+EXPORT_SYMBOL(gr_set_kernel_label);
51414+#ifdef CONFIG_SECURITY
51415+EXPORT_SYMBOL(gr_check_user_change);
51416+EXPORT_SYMBOL(gr_check_group_change);
51417+#endif
51418+
51419diff -urNp linux-3.1.1/grsecurity/gracl_cap.c linux-3.1.1/grsecurity/gracl_cap.c
51420--- linux-3.1.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51421+++ linux-3.1.1/grsecurity/gracl_cap.c 2011-11-16 18:40:31.000000000 -0500
51422@@ -0,0 +1,101 @@
51423+#include <linux/kernel.h>
51424+#include <linux/module.h>
51425+#include <linux/sched.h>
51426+#include <linux/gracl.h>
51427+#include <linux/grsecurity.h>
51428+#include <linux/grinternal.h>
51429+
51430+extern const char *captab_log[];
51431+extern int captab_log_entries;
51432+
51433+int
51434+gr_acl_is_capable(const int cap)
51435+{
51436+ struct task_struct *task = current;
51437+ const struct cred *cred = current_cred();
51438+ struct acl_subject_label *curracl;
51439+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51440+ kernel_cap_t cap_audit = __cap_empty_set;
51441+
51442+ if (!gr_acl_is_enabled())
51443+ return 1;
51444+
51445+ curracl = task->acl;
51446+
51447+ cap_drop = curracl->cap_lower;
51448+ cap_mask = curracl->cap_mask;
51449+ cap_audit = curracl->cap_invert_audit;
51450+
51451+ while ((curracl = curracl->parent_subject)) {
51452+ /* if the cap isn't specified in the current computed mask but is specified in the
51453+ current level subject, and is lowered in the current level subject, then add
51454+ it to the set of dropped capabilities
51455+ otherwise, add the current level subject's mask to the current computed mask
51456+ */
51457+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51458+ cap_raise(cap_mask, cap);
51459+ if (cap_raised(curracl->cap_lower, cap))
51460+ cap_raise(cap_drop, cap);
51461+ if (cap_raised(curracl->cap_invert_audit, cap))
51462+ cap_raise(cap_audit, cap);
51463+ }
51464+ }
51465+
51466+ if (!cap_raised(cap_drop, cap)) {
51467+ if (cap_raised(cap_audit, cap))
51468+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51469+ return 1;
51470+ }
51471+
51472+ curracl = task->acl;
51473+
51474+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51475+ && cap_raised(cred->cap_effective, cap)) {
51476+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51477+ task->role->roletype, cred->uid,
51478+ cred->gid, task->exec_file ?
51479+ gr_to_filename(task->exec_file->f_path.dentry,
51480+ task->exec_file->f_path.mnt) : curracl->filename,
51481+ curracl->filename, 0UL,
51482+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51483+ return 1;
51484+ }
51485+
51486+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51487+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51488+ return 0;
51489+}
51490+
51491+int
51492+gr_acl_is_capable_nolog(const int cap)
51493+{
51494+ struct acl_subject_label *curracl;
51495+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51496+
51497+ if (!gr_acl_is_enabled())
51498+ return 1;
51499+
51500+ curracl = current->acl;
51501+
51502+ cap_drop = curracl->cap_lower;
51503+ cap_mask = curracl->cap_mask;
51504+
51505+ while ((curracl = curracl->parent_subject)) {
51506+ /* if the cap isn't specified in the current computed mask but is specified in the
51507+ current level subject, and is lowered in the current level subject, then add
51508+ it to the set of dropped capabilities
51509+ otherwise, add the current level subject's mask to the current computed mask
51510+ */
51511+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51512+ cap_raise(cap_mask, cap);
51513+ if (cap_raised(curracl->cap_lower, cap))
51514+ cap_raise(cap_drop, cap);
51515+ }
51516+ }
51517+
51518+ if (!cap_raised(cap_drop, cap))
51519+ return 1;
51520+
51521+ return 0;
51522+}
51523+
51524diff -urNp linux-3.1.1/grsecurity/gracl_fs.c linux-3.1.1/grsecurity/gracl_fs.c
51525--- linux-3.1.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51526+++ linux-3.1.1/grsecurity/gracl_fs.c 2011-11-17 00:25:32.000000000 -0500
51527@@ -0,0 +1,433 @@
51528+#include <linux/kernel.h>
51529+#include <linux/sched.h>
51530+#include <linux/types.h>
51531+#include <linux/fs.h>
51532+#include <linux/file.h>
51533+#include <linux/stat.h>
51534+#include <linux/grsecurity.h>
51535+#include <linux/grinternal.h>
51536+#include <linux/gracl.h>
51537+
51538+__u32
51539+gr_acl_handle_hidden_file(const struct dentry * dentry,
51540+ const struct vfsmount * mnt)
51541+{
51542+ __u32 mode;
51543+
51544+ if (unlikely(!dentry->d_inode))
51545+ return GR_FIND;
51546+
51547+ mode =
51548+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51549+
51550+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51551+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51552+ return mode;
51553+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51554+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51555+ return 0;
51556+ } else if (unlikely(!(mode & GR_FIND)))
51557+ return 0;
51558+
51559+ return GR_FIND;
51560+}
51561+
51562+__u32
51563+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51564+ int acc_mode)
51565+{
51566+ __u32 reqmode = GR_FIND;
51567+ __u32 mode;
51568+
51569+ if (unlikely(!dentry->d_inode))
51570+ return reqmode;
51571+
51572+ if (acc_mode & MAY_APPEND)
51573+ reqmode |= GR_APPEND;
51574+ else if (acc_mode & MAY_WRITE)
51575+ reqmode |= GR_WRITE;
51576+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
51577+ reqmode |= GR_READ;
51578+
51579+ mode =
51580+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51581+ mnt);
51582+
51583+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51584+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51585+ reqmode & GR_READ ? " reading" : "",
51586+ reqmode & GR_WRITE ? " writing" : reqmode &
51587+ GR_APPEND ? " appending" : "");
51588+ return reqmode;
51589+ } else
51590+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51591+ {
51592+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51593+ reqmode & GR_READ ? " reading" : "",
51594+ reqmode & GR_WRITE ? " writing" : reqmode &
51595+ GR_APPEND ? " appending" : "");
51596+ return 0;
51597+ } else if (unlikely((mode & reqmode) != reqmode))
51598+ return 0;
51599+
51600+ return reqmode;
51601+}
51602+
51603+__u32
51604+gr_acl_handle_creat(const struct dentry * dentry,
51605+ const struct dentry * p_dentry,
51606+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
51607+ const int imode)
51608+{
51609+ __u32 reqmode = GR_WRITE | GR_CREATE;
51610+ __u32 mode;
51611+
51612+ if (acc_mode & MAY_APPEND)
51613+ reqmode |= GR_APPEND;
51614+ // if a directory was required or the directory already exists, then
51615+ // don't count this open as a read
51616+ if ((acc_mode & MAY_READ) &&
51617+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
51618+ reqmode |= GR_READ;
51619+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
51620+ reqmode |= GR_SETID;
51621+
51622+ mode =
51623+ gr_check_create(dentry, p_dentry, p_mnt,
51624+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51625+
51626+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51627+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51628+ reqmode & GR_READ ? " reading" : "",
51629+ reqmode & GR_WRITE ? " writing" : reqmode &
51630+ GR_APPEND ? " appending" : "");
51631+ return reqmode;
51632+ } else
51633+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51634+ {
51635+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51636+ reqmode & GR_READ ? " reading" : "",
51637+ reqmode & GR_WRITE ? " writing" : reqmode &
51638+ GR_APPEND ? " appending" : "");
51639+ return 0;
51640+ } else if (unlikely((mode & reqmode) != reqmode))
51641+ return 0;
51642+
51643+ return reqmode;
51644+}
51645+
51646+__u32
51647+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51648+ const int fmode)
51649+{
51650+ __u32 mode, reqmode = GR_FIND;
51651+
51652+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51653+ reqmode |= GR_EXEC;
51654+ if (fmode & S_IWOTH)
51655+ reqmode |= GR_WRITE;
51656+ if (fmode & S_IROTH)
51657+ reqmode |= GR_READ;
51658+
51659+ mode =
51660+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51661+ mnt);
51662+
51663+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51664+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51665+ reqmode & GR_READ ? " reading" : "",
51666+ reqmode & GR_WRITE ? " writing" : "",
51667+ reqmode & GR_EXEC ? " executing" : "");
51668+ return reqmode;
51669+ } else
51670+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51671+ {
51672+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51673+ reqmode & GR_READ ? " reading" : "",
51674+ reqmode & GR_WRITE ? " writing" : "",
51675+ reqmode & GR_EXEC ? " executing" : "");
51676+ return 0;
51677+ } else if (unlikely((mode & reqmode) != reqmode))
51678+ return 0;
51679+
51680+ return reqmode;
51681+}
51682+
51683+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51684+{
51685+ __u32 mode;
51686+
51687+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51688+
51689+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51690+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51691+ return mode;
51692+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51693+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51694+ return 0;
51695+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51696+ return 0;
51697+
51698+ return (reqmode);
51699+}
51700+
51701+__u32
51702+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51703+{
51704+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51705+}
51706+
51707+__u32
51708+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51709+{
51710+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51711+}
51712+
51713+__u32
51714+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51715+{
51716+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51717+}
51718+
51719+__u32
51720+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51721+{
51722+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51723+}
51724+
51725+__u32
51726+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51727+ mode_t mode)
51728+{
51729+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51730+ return 1;
51731+
51732+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51733+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51734+ GR_FCHMOD_ACL_MSG);
51735+ } else {
51736+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51737+ }
51738+}
51739+
51740+__u32
51741+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51742+ mode_t mode)
51743+{
51744+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51745+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51746+ GR_CHMOD_ACL_MSG);
51747+ } else {
51748+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51749+ }
51750+}
51751+
51752+__u32
51753+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51754+{
51755+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51756+}
51757+
51758+__u32
51759+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51760+{
51761+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51762+}
51763+
51764+__u32
51765+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51766+{
51767+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51768+}
51769+
51770+__u32
51771+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51772+{
51773+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51774+ GR_UNIXCONNECT_ACL_MSG);
51775+}
51776+
51777+/* hardlinks require at minimum create and link permission,
51778+ any additional privilege required is based on the
51779+ privilege of the file being linked to
51780+*/
51781+__u32
51782+gr_acl_handle_link(const struct dentry * new_dentry,
51783+ const struct dentry * parent_dentry,
51784+ const struct vfsmount * parent_mnt,
51785+ const struct dentry * old_dentry,
51786+ const struct vfsmount * old_mnt, const char *to)
51787+{
51788+ __u32 mode;
51789+ __u32 needmode = GR_CREATE | GR_LINK;
51790+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51791+
51792+ mode =
51793+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51794+ old_mnt);
51795+
51796+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51797+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51798+ return mode;
51799+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51800+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51801+ return 0;
51802+ } else if (unlikely((mode & needmode) != needmode))
51803+ return 0;
51804+
51805+ return 1;
51806+}
51807+
51808+__u32
51809+gr_acl_handle_symlink(const struct dentry * new_dentry,
51810+ const struct dentry * parent_dentry,
51811+ const struct vfsmount * parent_mnt, const char *from)
51812+{
51813+ __u32 needmode = GR_WRITE | GR_CREATE;
51814+ __u32 mode;
51815+
51816+ mode =
51817+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51818+ GR_CREATE | GR_AUDIT_CREATE |
51819+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51820+
51821+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51822+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51823+ return mode;
51824+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51825+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51826+ return 0;
51827+ } else if (unlikely((mode & needmode) != needmode))
51828+ return 0;
51829+
51830+ return (GR_WRITE | GR_CREATE);
51831+}
51832+
51833+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51834+{
51835+ __u32 mode;
51836+
51837+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51838+
51839+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51840+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51841+ return mode;
51842+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51843+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51844+ return 0;
51845+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51846+ return 0;
51847+
51848+ return (reqmode);
51849+}
51850+
51851+__u32
51852+gr_acl_handle_mknod(const struct dentry * new_dentry,
51853+ const struct dentry * parent_dentry,
51854+ const struct vfsmount * parent_mnt,
51855+ const int mode)
51856+{
51857+ __u32 reqmode = GR_WRITE | GR_CREATE;
51858+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51859+ reqmode |= GR_SETID;
51860+
51861+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51862+ reqmode, GR_MKNOD_ACL_MSG);
51863+}
51864+
51865+__u32
51866+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51867+ const struct dentry *parent_dentry,
51868+ const struct vfsmount *parent_mnt)
51869+{
51870+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51871+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51872+}
51873+
51874+#define RENAME_CHECK_SUCCESS(old, new) \
51875+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51876+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51877+
51878+int
51879+gr_acl_handle_rename(struct dentry *new_dentry,
51880+ struct dentry *parent_dentry,
51881+ const struct vfsmount *parent_mnt,
51882+ struct dentry *old_dentry,
51883+ struct inode *old_parent_inode,
51884+ struct vfsmount *old_mnt, const char *newname)
51885+{
51886+ __u32 comp1, comp2;
51887+ int error = 0;
51888+
51889+ if (unlikely(!gr_acl_is_enabled()))
51890+ return 0;
51891+
51892+ if (!new_dentry->d_inode) {
51893+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51894+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51895+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51896+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51897+ GR_DELETE | GR_AUDIT_DELETE |
51898+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51899+ GR_SUPPRESS, old_mnt);
51900+ } else {
51901+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51902+ GR_CREATE | GR_DELETE |
51903+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51904+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51905+ GR_SUPPRESS, parent_mnt);
51906+ comp2 =
51907+ gr_search_file(old_dentry,
51908+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51909+ GR_DELETE | GR_AUDIT_DELETE |
51910+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51911+ }
51912+
51913+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51914+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51915+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51916+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51917+ && !(comp2 & GR_SUPPRESS)) {
51918+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51919+ error = -EACCES;
51920+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51921+ error = -EACCES;
51922+
51923+ return error;
51924+}
51925+
51926+void
51927+gr_acl_handle_exit(void)
51928+{
51929+ u16 id;
51930+ char *rolename;
51931+ struct file *exec_file;
51932+
51933+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51934+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51935+ id = current->acl_role_id;
51936+ rolename = current->role->rolename;
51937+ gr_set_acls(1);
51938+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51939+ }
51940+
51941+ write_lock(&grsec_exec_file_lock);
51942+ exec_file = current->exec_file;
51943+ current->exec_file = NULL;
51944+ write_unlock(&grsec_exec_file_lock);
51945+
51946+ if (exec_file)
51947+ fput(exec_file);
51948+}
51949+
51950+int
51951+gr_acl_handle_procpidmem(const struct task_struct *task)
51952+{
51953+ if (unlikely(!gr_acl_is_enabled()))
51954+ return 0;
51955+
51956+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51957+ return -EACCES;
51958+
51959+ return 0;
51960+}
51961diff -urNp linux-3.1.1/grsecurity/gracl_ip.c linux-3.1.1/grsecurity/gracl_ip.c
51962--- linux-3.1.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51963+++ linux-3.1.1/grsecurity/gracl_ip.c 2011-11-16 18:40:31.000000000 -0500
51964@@ -0,0 +1,381 @@
51965+#include <linux/kernel.h>
51966+#include <asm/uaccess.h>
51967+#include <asm/errno.h>
51968+#include <net/sock.h>
51969+#include <linux/file.h>
51970+#include <linux/fs.h>
51971+#include <linux/net.h>
51972+#include <linux/in.h>
51973+#include <linux/skbuff.h>
51974+#include <linux/ip.h>
51975+#include <linux/udp.h>
51976+#include <linux/types.h>
51977+#include <linux/sched.h>
51978+#include <linux/netdevice.h>
51979+#include <linux/inetdevice.h>
51980+#include <linux/gracl.h>
51981+#include <linux/grsecurity.h>
51982+#include <linux/grinternal.h>
51983+
51984+#define GR_BIND 0x01
51985+#define GR_CONNECT 0x02
51986+#define GR_INVERT 0x04
51987+#define GR_BINDOVERRIDE 0x08
51988+#define GR_CONNECTOVERRIDE 0x10
51989+#define GR_SOCK_FAMILY 0x20
51990+
51991+static const char * gr_protocols[IPPROTO_MAX] = {
51992+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51993+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51994+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51995+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51996+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51997+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51998+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51999+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52000+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52001+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52002+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52003+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52004+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52005+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52006+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52007+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52008+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52009+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52010+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52011+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52012+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52013+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52014+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52015+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52016+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52017+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52018+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52019+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52020+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52021+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52022+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52023+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52024+ };
52025+
52026+static const char * gr_socktypes[SOCK_MAX] = {
52027+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52028+ "unknown:7", "unknown:8", "unknown:9", "packet"
52029+ };
52030+
52031+static const char * gr_sockfamilies[AF_MAX+1] = {
52032+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52033+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52034+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52035+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52036+ };
52037+
52038+const char *
52039+gr_proto_to_name(unsigned char proto)
52040+{
52041+ return gr_protocols[proto];
52042+}
52043+
52044+const char *
52045+gr_socktype_to_name(unsigned char type)
52046+{
52047+ return gr_socktypes[type];
52048+}
52049+
52050+const char *
52051+gr_sockfamily_to_name(unsigned char family)
52052+{
52053+ return gr_sockfamilies[family];
52054+}
52055+
52056+int
52057+gr_search_socket(const int domain, const int type, const int protocol)
52058+{
52059+ struct acl_subject_label *curr;
52060+ const struct cred *cred = current_cred();
52061+
52062+ if (unlikely(!gr_acl_is_enabled()))
52063+ goto exit;
52064+
52065+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52066+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52067+ goto exit; // let the kernel handle it
52068+
52069+ curr = current->acl;
52070+
52071+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52072+ /* the family is allowed, if this is PF_INET allow it only if
52073+ the extra sock type/protocol checks pass */
52074+ if (domain == PF_INET)
52075+ goto inet_check;
52076+ goto exit;
52077+ } else {
52078+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52079+ __u32 fakeip = 0;
52080+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52081+ current->role->roletype, cred->uid,
52082+ cred->gid, current->exec_file ?
52083+ gr_to_filename(current->exec_file->f_path.dentry,
52084+ current->exec_file->f_path.mnt) :
52085+ curr->filename, curr->filename,
52086+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52087+ &current->signal->saved_ip);
52088+ goto exit;
52089+ }
52090+ goto exit_fail;
52091+ }
52092+
52093+inet_check:
52094+ /* the rest of this checking is for IPv4 only */
52095+ if (!curr->ips)
52096+ goto exit;
52097+
52098+ if ((curr->ip_type & (1 << type)) &&
52099+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52100+ goto exit;
52101+
52102+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52103+ /* we don't place acls on raw sockets , and sometimes
52104+ dgram/ip sockets are opened for ioctl and not
52105+ bind/connect, so we'll fake a bind learn log */
52106+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52107+ __u32 fakeip = 0;
52108+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52109+ current->role->roletype, cred->uid,
52110+ cred->gid, current->exec_file ?
52111+ gr_to_filename(current->exec_file->f_path.dentry,
52112+ current->exec_file->f_path.mnt) :
52113+ curr->filename, curr->filename,
52114+ &fakeip, 0, type,
52115+ protocol, GR_CONNECT, &current->signal->saved_ip);
52116+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52117+ __u32 fakeip = 0;
52118+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52119+ current->role->roletype, cred->uid,
52120+ cred->gid, current->exec_file ?
52121+ gr_to_filename(current->exec_file->f_path.dentry,
52122+ current->exec_file->f_path.mnt) :
52123+ curr->filename, curr->filename,
52124+ &fakeip, 0, type,
52125+ protocol, GR_BIND, &current->signal->saved_ip);
52126+ }
52127+ /* we'll log when they use connect or bind */
52128+ goto exit;
52129+ }
52130+
52131+exit_fail:
52132+ if (domain == PF_INET)
52133+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52134+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52135+ else
52136+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52137+ gr_socktype_to_name(type), protocol);
52138+
52139+ return 0;
52140+exit:
52141+ return 1;
52142+}
52143+
52144+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52145+{
52146+ if ((ip->mode & mode) &&
52147+ (ip_port >= ip->low) &&
52148+ (ip_port <= ip->high) &&
52149+ ((ntohl(ip_addr) & our_netmask) ==
52150+ (ntohl(our_addr) & our_netmask))
52151+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52152+ && (ip->type & (1 << type))) {
52153+ if (ip->mode & GR_INVERT)
52154+ return 2; // specifically denied
52155+ else
52156+ return 1; // allowed
52157+ }
52158+
52159+ return 0; // not specifically allowed, may continue parsing
52160+}
52161+
52162+static int
52163+gr_search_connectbind(const int full_mode, struct sock *sk,
52164+ struct sockaddr_in *addr, const int type)
52165+{
52166+ char iface[IFNAMSIZ] = {0};
52167+ struct acl_subject_label *curr;
52168+ struct acl_ip_label *ip;
52169+ struct inet_sock *isk;
52170+ struct net_device *dev;
52171+ struct in_device *idev;
52172+ unsigned long i;
52173+ int ret;
52174+ int mode = full_mode & (GR_BIND | GR_CONNECT);
52175+ __u32 ip_addr = 0;
52176+ __u32 our_addr;
52177+ __u32 our_netmask;
52178+ char *p;
52179+ __u16 ip_port = 0;
52180+ const struct cred *cred = current_cred();
52181+
52182+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52183+ return 0;
52184+
52185+ curr = current->acl;
52186+ isk = inet_sk(sk);
52187+
52188+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52189+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52190+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52191+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52192+ struct sockaddr_in saddr;
52193+ int err;
52194+
52195+ saddr.sin_family = AF_INET;
52196+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52197+ saddr.sin_port = isk->inet_sport;
52198+
52199+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52200+ if (err)
52201+ return err;
52202+
52203+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52204+ if (err)
52205+ return err;
52206+ }
52207+
52208+ if (!curr->ips)
52209+ return 0;
52210+
52211+ ip_addr = addr->sin_addr.s_addr;
52212+ ip_port = ntohs(addr->sin_port);
52213+
52214+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52215+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52216+ current->role->roletype, cred->uid,
52217+ cred->gid, current->exec_file ?
52218+ gr_to_filename(current->exec_file->f_path.dentry,
52219+ current->exec_file->f_path.mnt) :
52220+ curr->filename, curr->filename,
52221+ &ip_addr, ip_port, type,
52222+ sk->sk_protocol, mode, &current->signal->saved_ip);
52223+ return 0;
52224+ }
52225+
52226+ for (i = 0; i < curr->ip_num; i++) {
52227+ ip = *(curr->ips + i);
52228+ if (ip->iface != NULL) {
52229+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52230+ p = strchr(iface, ':');
52231+ if (p != NULL)
52232+ *p = '\0';
52233+ dev = dev_get_by_name(sock_net(sk), iface);
52234+ if (dev == NULL)
52235+ continue;
52236+ idev = in_dev_get(dev);
52237+ if (idev == NULL) {
52238+ dev_put(dev);
52239+ continue;
52240+ }
52241+ rcu_read_lock();
52242+ for_ifa(idev) {
52243+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52244+ our_addr = ifa->ifa_address;
52245+ our_netmask = 0xffffffff;
52246+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52247+ if (ret == 1) {
52248+ rcu_read_unlock();
52249+ in_dev_put(idev);
52250+ dev_put(dev);
52251+ return 0;
52252+ } else if (ret == 2) {
52253+ rcu_read_unlock();
52254+ in_dev_put(idev);
52255+ dev_put(dev);
52256+ goto denied;
52257+ }
52258+ }
52259+ } endfor_ifa(idev);
52260+ rcu_read_unlock();
52261+ in_dev_put(idev);
52262+ dev_put(dev);
52263+ } else {
52264+ our_addr = ip->addr;
52265+ our_netmask = ip->netmask;
52266+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52267+ if (ret == 1)
52268+ return 0;
52269+ else if (ret == 2)
52270+ goto denied;
52271+ }
52272+ }
52273+
52274+denied:
52275+ if (mode == GR_BIND)
52276+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52277+ else if (mode == GR_CONNECT)
52278+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52279+
52280+ return -EACCES;
52281+}
52282+
52283+int
52284+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52285+{
52286+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52287+}
52288+
52289+int
52290+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52291+{
52292+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52293+}
52294+
52295+int gr_search_listen(struct socket *sock)
52296+{
52297+ struct sock *sk = sock->sk;
52298+ struct sockaddr_in addr;
52299+
52300+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52301+ addr.sin_port = inet_sk(sk)->inet_sport;
52302+
52303+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52304+}
52305+
52306+int gr_search_accept(struct socket *sock)
52307+{
52308+ struct sock *sk = sock->sk;
52309+ struct sockaddr_in addr;
52310+
52311+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52312+ addr.sin_port = inet_sk(sk)->inet_sport;
52313+
52314+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52315+}
52316+
52317+int
52318+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52319+{
52320+ if (addr)
52321+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52322+ else {
52323+ struct sockaddr_in sin;
52324+ const struct inet_sock *inet = inet_sk(sk);
52325+
52326+ sin.sin_addr.s_addr = inet->inet_daddr;
52327+ sin.sin_port = inet->inet_dport;
52328+
52329+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52330+ }
52331+}
52332+
52333+int
52334+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52335+{
52336+ struct sockaddr_in sin;
52337+
52338+ if (unlikely(skb->len < sizeof (struct udphdr)))
52339+ return 0; // skip this packet
52340+
52341+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52342+ sin.sin_port = udp_hdr(skb)->source;
52343+
52344+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52345+}
52346diff -urNp linux-3.1.1/grsecurity/gracl_learn.c linux-3.1.1/grsecurity/gracl_learn.c
52347--- linux-3.1.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52348+++ linux-3.1.1/grsecurity/gracl_learn.c 2011-11-16 18:40:31.000000000 -0500
52349@@ -0,0 +1,207 @@
52350+#include <linux/kernel.h>
52351+#include <linux/mm.h>
52352+#include <linux/sched.h>
52353+#include <linux/poll.h>
52354+#include <linux/string.h>
52355+#include <linux/file.h>
52356+#include <linux/types.h>
52357+#include <linux/vmalloc.h>
52358+#include <linux/grinternal.h>
52359+
52360+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52361+ size_t count, loff_t *ppos);
52362+extern int gr_acl_is_enabled(void);
52363+
52364+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52365+static int gr_learn_attached;
52366+
52367+/* use a 512k buffer */
52368+#define LEARN_BUFFER_SIZE (512 * 1024)
52369+
52370+static DEFINE_SPINLOCK(gr_learn_lock);
52371+static DEFINE_MUTEX(gr_learn_user_mutex);
52372+
52373+/* we need to maintain two buffers, so that the kernel context of grlearn
52374+ uses a semaphore around the userspace copying, and the other kernel contexts
52375+ use a spinlock when copying into the buffer, since they cannot sleep
52376+*/
52377+static char *learn_buffer;
52378+static char *learn_buffer_user;
52379+static int learn_buffer_len;
52380+static int learn_buffer_user_len;
52381+
52382+static ssize_t
52383+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52384+{
52385+ DECLARE_WAITQUEUE(wait, current);
52386+ ssize_t retval = 0;
52387+
52388+ add_wait_queue(&learn_wait, &wait);
52389+ set_current_state(TASK_INTERRUPTIBLE);
52390+ do {
52391+ mutex_lock(&gr_learn_user_mutex);
52392+ spin_lock(&gr_learn_lock);
52393+ if (learn_buffer_len)
52394+ break;
52395+ spin_unlock(&gr_learn_lock);
52396+ mutex_unlock(&gr_learn_user_mutex);
52397+ if (file->f_flags & O_NONBLOCK) {
52398+ retval = -EAGAIN;
52399+ goto out;
52400+ }
52401+ if (signal_pending(current)) {
52402+ retval = -ERESTARTSYS;
52403+ goto out;
52404+ }
52405+
52406+ schedule();
52407+ } while (1);
52408+
52409+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52410+ learn_buffer_user_len = learn_buffer_len;
52411+ retval = learn_buffer_len;
52412+ learn_buffer_len = 0;
52413+
52414+ spin_unlock(&gr_learn_lock);
52415+
52416+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52417+ retval = -EFAULT;
52418+
52419+ mutex_unlock(&gr_learn_user_mutex);
52420+out:
52421+ set_current_state(TASK_RUNNING);
52422+ remove_wait_queue(&learn_wait, &wait);
52423+ return retval;
52424+}
52425+
52426+static unsigned int
52427+poll_learn(struct file * file, poll_table * wait)
52428+{
52429+ poll_wait(file, &learn_wait, wait);
52430+
52431+ if (learn_buffer_len)
52432+ return (POLLIN | POLLRDNORM);
52433+
52434+ return 0;
52435+}
52436+
52437+void
52438+gr_clear_learn_entries(void)
52439+{
52440+ char *tmp;
52441+
52442+ mutex_lock(&gr_learn_user_mutex);
52443+ spin_lock(&gr_learn_lock);
52444+ tmp = learn_buffer;
52445+ learn_buffer = NULL;
52446+ spin_unlock(&gr_learn_lock);
52447+ if (tmp)
52448+ vfree(tmp);
52449+ if (learn_buffer_user != NULL) {
52450+ vfree(learn_buffer_user);
52451+ learn_buffer_user = NULL;
52452+ }
52453+ learn_buffer_len = 0;
52454+ mutex_unlock(&gr_learn_user_mutex);
52455+
52456+ return;
52457+}
52458+
52459+void
52460+gr_add_learn_entry(const char *fmt, ...)
52461+{
52462+ va_list args;
52463+ unsigned int len;
52464+
52465+ if (!gr_learn_attached)
52466+ return;
52467+
52468+ spin_lock(&gr_learn_lock);
52469+
52470+ /* leave a gap at the end so we know when it's "full" but don't have to
52471+ compute the exact length of the string we're trying to append
52472+ */
52473+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52474+ spin_unlock(&gr_learn_lock);
52475+ wake_up_interruptible(&learn_wait);
52476+ return;
52477+ }
52478+ if (learn_buffer == NULL) {
52479+ spin_unlock(&gr_learn_lock);
52480+ return;
52481+ }
52482+
52483+ va_start(args, fmt);
52484+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52485+ va_end(args);
52486+
52487+ learn_buffer_len += len + 1;
52488+
52489+ spin_unlock(&gr_learn_lock);
52490+ wake_up_interruptible(&learn_wait);
52491+
52492+ return;
52493+}
52494+
52495+static int
52496+open_learn(struct inode *inode, struct file *file)
52497+{
52498+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52499+ return -EBUSY;
52500+ if (file->f_mode & FMODE_READ) {
52501+ int retval = 0;
52502+ mutex_lock(&gr_learn_user_mutex);
52503+ if (learn_buffer == NULL)
52504+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52505+ if (learn_buffer_user == NULL)
52506+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52507+ if (learn_buffer == NULL) {
52508+ retval = -ENOMEM;
52509+ goto out_error;
52510+ }
52511+ if (learn_buffer_user == NULL) {
52512+ retval = -ENOMEM;
52513+ goto out_error;
52514+ }
52515+ learn_buffer_len = 0;
52516+ learn_buffer_user_len = 0;
52517+ gr_learn_attached = 1;
52518+out_error:
52519+ mutex_unlock(&gr_learn_user_mutex);
52520+ return retval;
52521+ }
52522+ return 0;
52523+}
52524+
52525+static int
52526+close_learn(struct inode *inode, struct file *file)
52527+{
52528+ if (file->f_mode & FMODE_READ) {
52529+ char *tmp = NULL;
52530+ mutex_lock(&gr_learn_user_mutex);
52531+ spin_lock(&gr_learn_lock);
52532+ tmp = learn_buffer;
52533+ learn_buffer = NULL;
52534+ spin_unlock(&gr_learn_lock);
52535+ if (tmp)
52536+ vfree(tmp);
52537+ if (learn_buffer_user != NULL) {
52538+ vfree(learn_buffer_user);
52539+ learn_buffer_user = NULL;
52540+ }
52541+ learn_buffer_len = 0;
52542+ learn_buffer_user_len = 0;
52543+ gr_learn_attached = 0;
52544+ mutex_unlock(&gr_learn_user_mutex);
52545+ }
52546+
52547+ return 0;
52548+}
52549+
52550+const struct file_operations grsec_fops = {
52551+ .read = read_learn,
52552+ .write = write_grsec_handler,
52553+ .open = open_learn,
52554+ .release = close_learn,
52555+ .poll = poll_learn,
52556+};
52557diff -urNp linux-3.1.1/grsecurity/gracl_res.c linux-3.1.1/grsecurity/gracl_res.c
52558--- linux-3.1.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52559+++ linux-3.1.1/grsecurity/gracl_res.c 2011-11-16 18:40:31.000000000 -0500
52560@@ -0,0 +1,68 @@
52561+#include <linux/kernel.h>
52562+#include <linux/sched.h>
52563+#include <linux/gracl.h>
52564+#include <linux/grinternal.h>
52565+
52566+static const char *restab_log[] = {
52567+ [RLIMIT_CPU] = "RLIMIT_CPU",
52568+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52569+ [RLIMIT_DATA] = "RLIMIT_DATA",
52570+ [RLIMIT_STACK] = "RLIMIT_STACK",
52571+ [RLIMIT_CORE] = "RLIMIT_CORE",
52572+ [RLIMIT_RSS] = "RLIMIT_RSS",
52573+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52574+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52575+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52576+ [RLIMIT_AS] = "RLIMIT_AS",
52577+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52578+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52579+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52580+ [RLIMIT_NICE] = "RLIMIT_NICE",
52581+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52582+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52583+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52584+};
52585+
52586+void
52587+gr_log_resource(const struct task_struct *task,
52588+ const int res, const unsigned long wanted, const int gt)
52589+{
52590+ const struct cred *cred;
52591+ unsigned long rlim;
52592+
52593+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52594+ return;
52595+
52596+ // not yet supported resource
52597+ if (unlikely(!restab_log[res]))
52598+ return;
52599+
52600+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52601+ rlim = task_rlimit_max(task, res);
52602+ else
52603+ rlim = task_rlimit(task, res);
52604+
52605+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52606+ return;
52607+
52608+ rcu_read_lock();
52609+ cred = __task_cred(task);
52610+
52611+ if (res == RLIMIT_NPROC &&
52612+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52613+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52614+ goto out_rcu_unlock;
52615+ else if (res == RLIMIT_MEMLOCK &&
52616+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52617+ goto out_rcu_unlock;
52618+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52619+ goto out_rcu_unlock;
52620+ rcu_read_unlock();
52621+
52622+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52623+
52624+ return;
52625+out_rcu_unlock:
52626+ rcu_read_unlock();
52627+ return;
52628+}
52629diff -urNp linux-3.1.1/grsecurity/gracl_segv.c linux-3.1.1/grsecurity/gracl_segv.c
52630--- linux-3.1.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52631+++ linux-3.1.1/grsecurity/gracl_segv.c 2011-11-16 18:40:31.000000000 -0500
52632@@ -0,0 +1,299 @@
52633+#include <linux/kernel.h>
52634+#include <linux/mm.h>
52635+#include <asm/uaccess.h>
52636+#include <asm/errno.h>
52637+#include <asm/mman.h>
52638+#include <net/sock.h>
52639+#include <linux/file.h>
52640+#include <linux/fs.h>
52641+#include <linux/net.h>
52642+#include <linux/in.h>
52643+#include <linux/slab.h>
52644+#include <linux/types.h>
52645+#include <linux/sched.h>
52646+#include <linux/timer.h>
52647+#include <linux/gracl.h>
52648+#include <linux/grsecurity.h>
52649+#include <linux/grinternal.h>
52650+
52651+static struct crash_uid *uid_set;
52652+static unsigned short uid_used;
52653+static DEFINE_SPINLOCK(gr_uid_lock);
52654+extern rwlock_t gr_inode_lock;
52655+extern struct acl_subject_label *
52656+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52657+ struct acl_role_label *role);
52658+
52659+#ifdef CONFIG_BTRFS_FS
52660+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52661+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52662+#endif
52663+
52664+static inline dev_t __get_dev(const struct dentry *dentry)
52665+{
52666+#ifdef CONFIG_BTRFS_FS
52667+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52668+ return get_btrfs_dev_from_inode(dentry->d_inode);
52669+ else
52670+#endif
52671+ return dentry->d_inode->i_sb->s_dev;
52672+}
52673+
52674+int
52675+gr_init_uidset(void)
52676+{
52677+ uid_set =
52678+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52679+ uid_used = 0;
52680+
52681+ return uid_set ? 1 : 0;
52682+}
52683+
52684+void
52685+gr_free_uidset(void)
52686+{
52687+ if (uid_set)
52688+ kfree(uid_set);
52689+
52690+ return;
52691+}
52692+
52693+int
52694+gr_find_uid(const uid_t uid)
52695+{
52696+ struct crash_uid *tmp = uid_set;
52697+ uid_t buid;
52698+ int low = 0, high = uid_used - 1, mid;
52699+
52700+ while (high >= low) {
52701+ mid = (low + high) >> 1;
52702+ buid = tmp[mid].uid;
52703+ if (buid == uid)
52704+ return mid;
52705+ if (buid > uid)
52706+ high = mid - 1;
52707+ if (buid < uid)
52708+ low = mid + 1;
52709+ }
52710+
52711+ return -1;
52712+}
52713+
52714+static __inline__ void
52715+gr_insertsort(void)
52716+{
52717+ unsigned short i, j;
52718+ struct crash_uid index;
52719+
52720+ for (i = 1; i < uid_used; i++) {
52721+ index = uid_set[i];
52722+ j = i;
52723+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52724+ uid_set[j] = uid_set[j - 1];
52725+ j--;
52726+ }
52727+ uid_set[j] = index;
52728+ }
52729+
52730+ return;
52731+}
52732+
52733+static __inline__ void
52734+gr_insert_uid(const uid_t uid, const unsigned long expires)
52735+{
52736+ int loc;
52737+
52738+ if (uid_used == GR_UIDTABLE_MAX)
52739+ return;
52740+
52741+ loc = gr_find_uid(uid);
52742+
52743+ if (loc >= 0) {
52744+ uid_set[loc].expires = expires;
52745+ return;
52746+ }
52747+
52748+ uid_set[uid_used].uid = uid;
52749+ uid_set[uid_used].expires = expires;
52750+ uid_used++;
52751+
52752+ gr_insertsort();
52753+
52754+ return;
52755+}
52756+
52757+void
52758+gr_remove_uid(const unsigned short loc)
52759+{
52760+ unsigned short i;
52761+
52762+ for (i = loc + 1; i < uid_used; i++)
52763+ uid_set[i - 1] = uid_set[i];
52764+
52765+ uid_used--;
52766+
52767+ return;
52768+}
52769+
52770+int
52771+gr_check_crash_uid(const uid_t uid)
52772+{
52773+ int loc;
52774+ int ret = 0;
52775+
52776+ if (unlikely(!gr_acl_is_enabled()))
52777+ return 0;
52778+
52779+ spin_lock(&gr_uid_lock);
52780+ loc = gr_find_uid(uid);
52781+
52782+ if (loc < 0)
52783+ goto out_unlock;
52784+
52785+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52786+ gr_remove_uid(loc);
52787+ else
52788+ ret = 1;
52789+
52790+out_unlock:
52791+ spin_unlock(&gr_uid_lock);
52792+ return ret;
52793+}
52794+
52795+static __inline__ int
52796+proc_is_setxid(const struct cred *cred)
52797+{
52798+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52799+ cred->uid != cred->fsuid)
52800+ return 1;
52801+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52802+ cred->gid != cred->fsgid)
52803+ return 1;
52804+
52805+ return 0;
52806+}
52807+
52808+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52809+
52810+void
52811+gr_handle_crash(struct task_struct *task, const int sig)
52812+{
52813+ struct acl_subject_label *curr;
52814+ struct task_struct *tsk, *tsk2;
52815+ const struct cred *cred;
52816+ const struct cred *cred2;
52817+
52818+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52819+ return;
52820+
52821+ if (unlikely(!gr_acl_is_enabled()))
52822+ return;
52823+
52824+ curr = task->acl;
52825+
52826+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52827+ return;
52828+
52829+ if (time_before_eq(curr->expires, get_seconds())) {
52830+ curr->expires = 0;
52831+ curr->crashes = 0;
52832+ }
52833+
52834+ curr->crashes++;
52835+
52836+ if (!curr->expires)
52837+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52838+
52839+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52840+ time_after(curr->expires, get_seconds())) {
52841+ rcu_read_lock();
52842+ cred = __task_cred(task);
52843+ if (cred->uid && proc_is_setxid(cred)) {
52844+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52845+ spin_lock(&gr_uid_lock);
52846+ gr_insert_uid(cred->uid, curr->expires);
52847+ spin_unlock(&gr_uid_lock);
52848+ curr->expires = 0;
52849+ curr->crashes = 0;
52850+ read_lock(&tasklist_lock);
52851+ do_each_thread(tsk2, tsk) {
52852+ cred2 = __task_cred(tsk);
52853+ if (tsk != task && cred2->uid == cred->uid)
52854+ gr_fake_force_sig(SIGKILL, tsk);
52855+ } while_each_thread(tsk2, tsk);
52856+ read_unlock(&tasklist_lock);
52857+ } else {
52858+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52859+ read_lock(&tasklist_lock);
52860+ read_lock(&grsec_exec_file_lock);
52861+ do_each_thread(tsk2, tsk) {
52862+ if (likely(tsk != task)) {
52863+ // if this thread has the same subject as the one that triggered
52864+ // RES_CRASH and it's the same binary, kill it
52865+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
52866+ gr_fake_force_sig(SIGKILL, tsk);
52867+ }
52868+ } while_each_thread(tsk2, tsk);
52869+ read_unlock(&grsec_exec_file_lock);
52870+ read_unlock(&tasklist_lock);
52871+ }
52872+ rcu_read_unlock();
52873+ }
52874+
52875+ return;
52876+}
52877+
52878+int
52879+gr_check_crash_exec(const struct file *filp)
52880+{
52881+ struct acl_subject_label *curr;
52882+
52883+ if (unlikely(!gr_acl_is_enabled()))
52884+ return 0;
52885+
52886+ read_lock(&gr_inode_lock);
52887+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52888+ __get_dev(filp->f_path.dentry),
52889+ current->role);
52890+ read_unlock(&gr_inode_lock);
52891+
52892+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52893+ (!curr->crashes && !curr->expires))
52894+ return 0;
52895+
52896+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52897+ time_after(curr->expires, get_seconds()))
52898+ return 1;
52899+ else if (time_before_eq(curr->expires, get_seconds())) {
52900+ curr->crashes = 0;
52901+ curr->expires = 0;
52902+ }
52903+
52904+ return 0;
52905+}
52906+
52907+void
52908+gr_handle_alertkill(struct task_struct *task)
52909+{
52910+ struct acl_subject_label *curracl;
52911+ __u32 curr_ip;
52912+ struct task_struct *p, *p2;
52913+
52914+ if (unlikely(!gr_acl_is_enabled()))
52915+ return;
52916+
52917+ curracl = task->acl;
52918+ curr_ip = task->signal->curr_ip;
52919+
52920+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52921+ read_lock(&tasklist_lock);
52922+ do_each_thread(p2, p) {
52923+ if (p->signal->curr_ip == curr_ip)
52924+ gr_fake_force_sig(SIGKILL, p);
52925+ } while_each_thread(p2, p);
52926+ read_unlock(&tasklist_lock);
52927+ } else if (curracl->mode & GR_KILLPROC)
52928+ gr_fake_force_sig(SIGKILL, task);
52929+
52930+ return;
52931+}
52932diff -urNp linux-3.1.1/grsecurity/gracl_shm.c linux-3.1.1/grsecurity/gracl_shm.c
52933--- linux-3.1.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52934+++ linux-3.1.1/grsecurity/gracl_shm.c 2011-11-16 18:40:31.000000000 -0500
52935@@ -0,0 +1,40 @@
52936+#include <linux/kernel.h>
52937+#include <linux/mm.h>
52938+#include <linux/sched.h>
52939+#include <linux/file.h>
52940+#include <linux/ipc.h>
52941+#include <linux/gracl.h>
52942+#include <linux/grsecurity.h>
52943+#include <linux/grinternal.h>
52944+
52945+int
52946+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52947+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52948+{
52949+ struct task_struct *task;
52950+
52951+ if (!gr_acl_is_enabled())
52952+ return 1;
52953+
52954+ rcu_read_lock();
52955+ read_lock(&tasklist_lock);
52956+
52957+ task = find_task_by_vpid(shm_cprid);
52958+
52959+ if (unlikely(!task))
52960+ task = find_task_by_vpid(shm_lapid);
52961+
52962+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52963+ (task->pid == shm_lapid)) &&
52964+ (task->acl->mode & GR_PROTSHM) &&
52965+ (task->acl != current->acl))) {
52966+ read_unlock(&tasklist_lock);
52967+ rcu_read_unlock();
52968+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52969+ return 0;
52970+ }
52971+ read_unlock(&tasklist_lock);
52972+ rcu_read_unlock();
52973+
52974+ return 1;
52975+}
52976diff -urNp linux-3.1.1/grsecurity/grsec_chdir.c linux-3.1.1/grsecurity/grsec_chdir.c
52977--- linux-3.1.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52978+++ linux-3.1.1/grsecurity/grsec_chdir.c 2011-11-16 18:40:31.000000000 -0500
52979@@ -0,0 +1,19 @@
52980+#include <linux/kernel.h>
52981+#include <linux/sched.h>
52982+#include <linux/fs.h>
52983+#include <linux/file.h>
52984+#include <linux/grsecurity.h>
52985+#include <linux/grinternal.h>
52986+
52987+void
52988+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52989+{
52990+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52991+ if ((grsec_enable_chdir && grsec_enable_group &&
52992+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52993+ !grsec_enable_group)) {
52994+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52995+ }
52996+#endif
52997+ return;
52998+}
52999diff -urNp linux-3.1.1/grsecurity/grsec_chroot.c linux-3.1.1/grsecurity/grsec_chroot.c
53000--- linux-3.1.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53001+++ linux-3.1.1/grsecurity/grsec_chroot.c 2011-11-16 18:40:31.000000000 -0500
53002@@ -0,0 +1,351 @@
53003+#include <linux/kernel.h>
53004+#include <linux/module.h>
53005+#include <linux/sched.h>
53006+#include <linux/file.h>
53007+#include <linux/fs.h>
53008+#include <linux/mount.h>
53009+#include <linux/types.h>
53010+#include <linux/pid_namespace.h>
53011+#include <linux/grsecurity.h>
53012+#include <linux/grinternal.h>
53013+
53014+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53015+{
53016+#ifdef CONFIG_GRKERNSEC
53017+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53018+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53019+ task->gr_is_chrooted = 1;
53020+ else
53021+ task->gr_is_chrooted = 0;
53022+
53023+ task->gr_chroot_dentry = path->dentry;
53024+#endif
53025+ return;
53026+}
53027+
53028+void gr_clear_chroot_entries(struct task_struct *task)
53029+{
53030+#ifdef CONFIG_GRKERNSEC
53031+ task->gr_is_chrooted = 0;
53032+ task->gr_chroot_dentry = NULL;
53033+#endif
53034+ return;
53035+}
53036+
53037+int
53038+gr_handle_chroot_unix(const pid_t pid)
53039+{
53040+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53041+ struct task_struct *p;
53042+
53043+ if (unlikely(!grsec_enable_chroot_unix))
53044+ return 1;
53045+
53046+ if (likely(!proc_is_chrooted(current)))
53047+ return 1;
53048+
53049+ rcu_read_lock();
53050+ read_lock(&tasklist_lock);
53051+ p = find_task_by_vpid_unrestricted(pid);
53052+ if (unlikely(p && !have_same_root(current, p))) {
53053+ read_unlock(&tasklist_lock);
53054+ rcu_read_unlock();
53055+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53056+ return 0;
53057+ }
53058+ read_unlock(&tasklist_lock);
53059+ rcu_read_unlock();
53060+#endif
53061+ return 1;
53062+}
53063+
53064+int
53065+gr_handle_chroot_nice(void)
53066+{
53067+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53068+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53069+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53070+ return -EPERM;
53071+ }
53072+#endif
53073+ return 0;
53074+}
53075+
53076+int
53077+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53078+{
53079+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53080+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53081+ && proc_is_chrooted(current)) {
53082+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53083+ return -EACCES;
53084+ }
53085+#endif
53086+ return 0;
53087+}
53088+
53089+int
53090+gr_handle_chroot_rawio(const struct inode *inode)
53091+{
53092+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53093+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53094+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53095+ return 1;
53096+#endif
53097+ return 0;
53098+}
53099+
53100+int
53101+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53102+{
53103+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53104+ struct task_struct *p;
53105+ int ret = 0;
53106+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53107+ return ret;
53108+
53109+ read_lock(&tasklist_lock);
53110+ do_each_pid_task(pid, type, p) {
53111+ if (!have_same_root(current, p)) {
53112+ ret = 1;
53113+ goto out;
53114+ }
53115+ } while_each_pid_task(pid, type, p);
53116+out:
53117+ read_unlock(&tasklist_lock);
53118+ return ret;
53119+#endif
53120+ return 0;
53121+}
53122+
53123+int
53124+gr_pid_is_chrooted(struct task_struct *p)
53125+{
53126+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53127+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53128+ return 0;
53129+
53130+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53131+ !have_same_root(current, p)) {
53132+ return 1;
53133+ }
53134+#endif
53135+ return 0;
53136+}
53137+
53138+EXPORT_SYMBOL(gr_pid_is_chrooted);
53139+
53140+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53141+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53142+{
53143+ struct path path, currentroot;
53144+ int ret = 0;
53145+
53146+ path.dentry = (struct dentry *)u_dentry;
53147+ path.mnt = (struct vfsmount *)u_mnt;
53148+ get_fs_root(current->fs, &currentroot);
53149+ if (path_is_under(&path, &currentroot))
53150+ ret = 1;
53151+ path_put(&currentroot);
53152+
53153+ return ret;
53154+}
53155+#endif
53156+
53157+int
53158+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53159+{
53160+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53161+ if (!grsec_enable_chroot_fchdir)
53162+ return 1;
53163+
53164+ if (!proc_is_chrooted(current))
53165+ return 1;
53166+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53167+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53168+ return 0;
53169+ }
53170+#endif
53171+ return 1;
53172+}
53173+
53174+int
53175+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53176+ const time_t shm_createtime)
53177+{
53178+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53179+ struct task_struct *p;
53180+ time_t starttime;
53181+
53182+ if (unlikely(!grsec_enable_chroot_shmat))
53183+ return 1;
53184+
53185+ if (likely(!proc_is_chrooted(current)))
53186+ return 1;
53187+
53188+ rcu_read_lock();
53189+ read_lock(&tasklist_lock);
53190+
53191+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53192+ starttime = p->start_time.tv_sec;
53193+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53194+ if (have_same_root(current, p)) {
53195+ goto allow;
53196+ } else {
53197+ read_unlock(&tasklist_lock);
53198+ rcu_read_unlock();
53199+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53200+ return 0;
53201+ }
53202+ }
53203+ /* creator exited, pid reuse, fall through to next check */
53204+ }
53205+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53206+ if (unlikely(!have_same_root(current, p))) {
53207+ read_unlock(&tasklist_lock);
53208+ rcu_read_unlock();
53209+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53210+ return 0;
53211+ }
53212+ }
53213+
53214+allow:
53215+ read_unlock(&tasklist_lock);
53216+ rcu_read_unlock();
53217+#endif
53218+ return 1;
53219+}
53220+
53221+void
53222+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53223+{
53224+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53225+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53226+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53227+#endif
53228+ return;
53229+}
53230+
53231+int
53232+gr_handle_chroot_mknod(const struct dentry *dentry,
53233+ const struct vfsmount *mnt, const int mode)
53234+{
53235+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53236+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53237+ proc_is_chrooted(current)) {
53238+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53239+ return -EPERM;
53240+ }
53241+#endif
53242+ return 0;
53243+}
53244+
53245+int
53246+gr_handle_chroot_mount(const struct dentry *dentry,
53247+ const struct vfsmount *mnt, const char *dev_name)
53248+{
53249+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53250+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53251+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
53252+ return -EPERM;
53253+ }
53254+#endif
53255+ return 0;
53256+}
53257+
53258+int
53259+gr_handle_chroot_pivot(void)
53260+{
53261+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53262+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53263+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53264+ return -EPERM;
53265+ }
53266+#endif
53267+ return 0;
53268+}
53269+
53270+int
53271+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53272+{
53273+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53274+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53275+ !gr_is_outside_chroot(dentry, mnt)) {
53276+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53277+ return -EPERM;
53278+ }
53279+#endif
53280+ return 0;
53281+}
53282+
53283+extern const char *captab_log[];
53284+extern int captab_log_entries;
53285+
53286+int
53287+gr_chroot_is_capable(const int cap)
53288+{
53289+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53290+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53291+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53292+ if (cap_raised(chroot_caps, cap)) {
53293+ const struct cred *creds = current_cred();
53294+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
53295+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
53296+ }
53297+ return 0;
53298+ }
53299+ }
53300+#endif
53301+ return 1;
53302+}
53303+
53304+int
53305+gr_chroot_is_capable_nolog(const int cap)
53306+{
53307+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53308+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53309+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53310+ if (cap_raised(chroot_caps, cap)) {
53311+ return 0;
53312+ }
53313+ }
53314+#endif
53315+ return 1;
53316+}
53317+
53318+int
53319+gr_handle_chroot_sysctl(const int op)
53320+{
53321+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53322+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
53323+ proc_is_chrooted(current))
53324+ return -EACCES;
53325+#endif
53326+ return 0;
53327+}
53328+
53329+void
53330+gr_handle_chroot_chdir(struct path *path)
53331+{
53332+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53333+ if (grsec_enable_chroot_chdir)
53334+ set_fs_pwd(current->fs, path);
53335+#endif
53336+ return;
53337+}
53338+
53339+int
53340+gr_handle_chroot_chmod(const struct dentry *dentry,
53341+ const struct vfsmount *mnt, const int mode)
53342+{
53343+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53344+ /* allow chmod +s on directories, but not files */
53345+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53346+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53347+ proc_is_chrooted(current)) {
53348+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53349+ return -EPERM;
53350+ }
53351+#endif
53352+ return 0;
53353+}
53354diff -urNp linux-3.1.1/grsecurity/grsec_disabled.c linux-3.1.1/grsecurity/grsec_disabled.c
53355--- linux-3.1.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53356+++ linux-3.1.1/grsecurity/grsec_disabled.c 2011-11-17 00:16:25.000000000 -0500
53357@@ -0,0 +1,439 @@
53358+#include <linux/kernel.h>
53359+#include <linux/module.h>
53360+#include <linux/sched.h>
53361+#include <linux/file.h>
53362+#include <linux/fs.h>
53363+#include <linux/kdev_t.h>
53364+#include <linux/net.h>
53365+#include <linux/in.h>
53366+#include <linux/ip.h>
53367+#include <linux/skbuff.h>
53368+#include <linux/sysctl.h>
53369+
53370+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53371+void
53372+pax_set_initial_flags(struct linux_binprm *bprm)
53373+{
53374+ return;
53375+}
53376+#endif
53377+
53378+#ifdef CONFIG_SYSCTL
53379+__u32
53380+gr_handle_sysctl(const struct ctl_table * table, const int op)
53381+{
53382+ return 0;
53383+}
53384+#endif
53385+
53386+#ifdef CONFIG_TASKSTATS
53387+int gr_is_taskstats_denied(int pid)
53388+{
53389+ return 0;
53390+}
53391+#endif
53392+
53393+int
53394+gr_acl_is_enabled(void)
53395+{
53396+ return 0;
53397+}
53398+
53399+void
53400+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53401+{
53402+ return;
53403+}
53404+
53405+int
53406+gr_handle_rawio(const struct inode *inode)
53407+{
53408+ return 0;
53409+}
53410+
53411+void
53412+gr_acl_handle_psacct(struct task_struct *task, const long code)
53413+{
53414+ return;
53415+}
53416+
53417+int
53418+gr_handle_ptrace(struct task_struct *task, const long request)
53419+{
53420+ return 0;
53421+}
53422+
53423+int
53424+gr_handle_proc_ptrace(struct task_struct *task)
53425+{
53426+ return 0;
53427+}
53428+
53429+void
53430+gr_learn_resource(const struct task_struct *task,
53431+ const int res, const unsigned long wanted, const int gt)
53432+{
53433+ return;
53434+}
53435+
53436+int
53437+gr_set_acls(const int type)
53438+{
53439+ return 0;
53440+}
53441+
53442+int
53443+gr_check_hidden_task(const struct task_struct *tsk)
53444+{
53445+ return 0;
53446+}
53447+
53448+int
53449+gr_check_protected_task(const struct task_struct *task)
53450+{
53451+ return 0;
53452+}
53453+
53454+int
53455+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53456+{
53457+ return 0;
53458+}
53459+
53460+void
53461+gr_copy_label(struct task_struct *tsk)
53462+{
53463+ return;
53464+}
53465+
53466+void
53467+gr_set_pax_flags(struct task_struct *task)
53468+{
53469+ return;
53470+}
53471+
53472+int
53473+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53474+ const int unsafe_share)
53475+{
53476+ return 0;
53477+}
53478+
53479+void
53480+gr_handle_delete(const ino_t ino, const dev_t dev)
53481+{
53482+ return;
53483+}
53484+
53485+void
53486+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53487+{
53488+ return;
53489+}
53490+
53491+void
53492+gr_handle_crash(struct task_struct *task, const int sig)
53493+{
53494+ return;
53495+}
53496+
53497+int
53498+gr_check_crash_exec(const struct file *filp)
53499+{
53500+ return 0;
53501+}
53502+
53503+int
53504+gr_check_crash_uid(const uid_t uid)
53505+{
53506+ return 0;
53507+}
53508+
53509+void
53510+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53511+ struct dentry *old_dentry,
53512+ struct dentry *new_dentry,
53513+ struct vfsmount *mnt, const __u8 replace)
53514+{
53515+ return;
53516+}
53517+
53518+int
53519+gr_search_socket(const int family, const int type, const int protocol)
53520+{
53521+ return 1;
53522+}
53523+
53524+int
53525+gr_search_connectbind(const int mode, const struct socket *sock,
53526+ const struct sockaddr_in *addr)
53527+{
53528+ return 0;
53529+}
53530+
53531+void
53532+gr_handle_alertkill(struct task_struct *task)
53533+{
53534+ return;
53535+}
53536+
53537+__u32
53538+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53539+{
53540+ return 1;
53541+}
53542+
53543+__u32
53544+gr_acl_handle_hidden_file(const struct dentry * dentry,
53545+ const struct vfsmount * mnt)
53546+{
53547+ return 1;
53548+}
53549+
53550+__u32
53551+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53552+ int acc_mode)
53553+{
53554+ return 1;
53555+}
53556+
53557+__u32
53558+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53559+{
53560+ return 1;
53561+}
53562+
53563+__u32
53564+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53565+{
53566+ return 1;
53567+}
53568+
53569+int
53570+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53571+ unsigned int *vm_flags)
53572+{
53573+ return 1;
53574+}
53575+
53576+__u32
53577+gr_acl_handle_truncate(const struct dentry * dentry,
53578+ const struct vfsmount * mnt)
53579+{
53580+ return 1;
53581+}
53582+
53583+__u32
53584+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53585+{
53586+ return 1;
53587+}
53588+
53589+__u32
53590+gr_acl_handle_access(const struct dentry * dentry,
53591+ const struct vfsmount * mnt, const int fmode)
53592+{
53593+ return 1;
53594+}
53595+
53596+__u32
53597+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53598+ mode_t mode)
53599+{
53600+ return 1;
53601+}
53602+
53603+__u32
53604+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53605+ mode_t mode)
53606+{
53607+ return 1;
53608+}
53609+
53610+__u32
53611+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53612+{
53613+ return 1;
53614+}
53615+
53616+__u32
53617+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53618+{
53619+ return 1;
53620+}
53621+
53622+void
53623+grsecurity_init(void)
53624+{
53625+ return;
53626+}
53627+
53628+__u32
53629+gr_acl_handle_mknod(const struct dentry * new_dentry,
53630+ const struct dentry * parent_dentry,
53631+ const struct vfsmount * parent_mnt,
53632+ const int mode)
53633+{
53634+ return 1;
53635+}
53636+
53637+__u32
53638+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53639+ const struct dentry * parent_dentry,
53640+ const struct vfsmount * parent_mnt)
53641+{
53642+ return 1;
53643+}
53644+
53645+__u32
53646+gr_acl_handle_symlink(const struct dentry * new_dentry,
53647+ const struct dentry * parent_dentry,
53648+ const struct vfsmount * parent_mnt, const char *from)
53649+{
53650+ return 1;
53651+}
53652+
53653+__u32
53654+gr_acl_handle_link(const struct dentry * new_dentry,
53655+ const struct dentry * parent_dentry,
53656+ const struct vfsmount * parent_mnt,
53657+ const struct dentry * old_dentry,
53658+ const struct vfsmount * old_mnt, const char *to)
53659+{
53660+ return 1;
53661+}
53662+
53663+int
53664+gr_acl_handle_rename(const struct dentry *new_dentry,
53665+ const struct dentry *parent_dentry,
53666+ const struct vfsmount *parent_mnt,
53667+ const struct dentry *old_dentry,
53668+ const struct inode *old_parent_inode,
53669+ const struct vfsmount *old_mnt, const char *newname)
53670+{
53671+ return 0;
53672+}
53673+
53674+int
53675+gr_acl_handle_filldir(const struct file *file, const char *name,
53676+ const int namelen, const ino_t ino)
53677+{
53678+ return 1;
53679+}
53680+
53681+int
53682+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53683+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53684+{
53685+ return 1;
53686+}
53687+
53688+int
53689+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53690+{
53691+ return 0;
53692+}
53693+
53694+int
53695+gr_search_accept(const struct socket *sock)
53696+{
53697+ return 0;
53698+}
53699+
53700+int
53701+gr_search_listen(const struct socket *sock)
53702+{
53703+ return 0;
53704+}
53705+
53706+int
53707+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53708+{
53709+ return 0;
53710+}
53711+
53712+__u32
53713+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53714+{
53715+ return 1;
53716+}
53717+
53718+__u32
53719+gr_acl_handle_creat(const struct dentry * dentry,
53720+ const struct dentry * p_dentry,
53721+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
53722+ const int imode)
53723+{
53724+ return 1;
53725+}
53726+
53727+void
53728+gr_acl_handle_exit(void)
53729+{
53730+ return;
53731+}
53732+
53733+int
53734+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53735+{
53736+ return 1;
53737+}
53738+
53739+void
53740+gr_set_role_label(const uid_t uid, const gid_t gid)
53741+{
53742+ return;
53743+}
53744+
53745+int
53746+gr_acl_handle_procpidmem(const struct task_struct *task)
53747+{
53748+ return 0;
53749+}
53750+
53751+int
53752+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53753+{
53754+ return 0;
53755+}
53756+
53757+int
53758+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53759+{
53760+ return 0;
53761+}
53762+
53763+void
53764+gr_set_kernel_label(struct task_struct *task)
53765+{
53766+ return;
53767+}
53768+
53769+int
53770+gr_check_user_change(int real, int effective, int fs)
53771+{
53772+ return 0;
53773+}
53774+
53775+int
53776+gr_check_group_change(int real, int effective, int fs)
53777+{
53778+ return 0;
53779+}
53780+
53781+int gr_acl_enable_at_secure(void)
53782+{
53783+ return 0;
53784+}
53785+
53786+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53787+{
53788+ return dentry->d_inode->i_sb->s_dev;
53789+}
53790+
53791+EXPORT_SYMBOL(gr_learn_resource);
53792+EXPORT_SYMBOL(gr_set_kernel_label);
53793+#ifdef CONFIG_SECURITY
53794+EXPORT_SYMBOL(gr_check_user_change);
53795+EXPORT_SYMBOL(gr_check_group_change);
53796+#endif
53797diff -urNp linux-3.1.1/grsecurity/grsec_exec.c linux-3.1.1/grsecurity/grsec_exec.c
53798--- linux-3.1.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53799+++ linux-3.1.1/grsecurity/grsec_exec.c 2011-11-16 18:40:31.000000000 -0500
53800@@ -0,0 +1,146 @@
53801+#include <linux/kernel.h>
53802+#include <linux/sched.h>
53803+#include <linux/file.h>
53804+#include <linux/binfmts.h>
53805+#include <linux/fs.h>
53806+#include <linux/types.h>
53807+#include <linux/grdefs.h>
53808+#include <linux/grsecurity.h>
53809+#include <linux/grinternal.h>
53810+#include <linux/capability.h>
53811+#include <linux/module.h>
53812+
53813+#include <asm/uaccess.h>
53814+
53815+#ifdef CONFIG_GRKERNSEC_EXECLOG
53816+static char gr_exec_arg_buf[132];
53817+static DEFINE_MUTEX(gr_exec_arg_mutex);
53818+#endif
53819+
53820+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53821+
53822+void
53823+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53824+{
53825+#ifdef CONFIG_GRKERNSEC_EXECLOG
53826+ char *grarg = gr_exec_arg_buf;
53827+ unsigned int i, x, execlen = 0;
53828+ char c;
53829+
53830+ if (!((grsec_enable_execlog && grsec_enable_group &&
53831+ in_group_p(grsec_audit_gid))
53832+ || (grsec_enable_execlog && !grsec_enable_group)))
53833+ return;
53834+
53835+ mutex_lock(&gr_exec_arg_mutex);
53836+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53837+
53838+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53839+ const char __user *p;
53840+ unsigned int len;
53841+
53842+ p = get_user_arg_ptr(argv, i);
53843+ if (IS_ERR(p))
53844+ goto log;
53845+
53846+ len = strnlen_user(p, 128 - execlen);
53847+ if (len > 128 - execlen)
53848+ len = 128 - execlen;
53849+ else if (len > 0)
53850+ len--;
53851+ if (copy_from_user(grarg + execlen, p, len))
53852+ goto log;
53853+
53854+ /* rewrite unprintable characters */
53855+ for (x = 0; x < len; x++) {
53856+ c = *(grarg + execlen + x);
53857+ if (c < 32 || c > 126)
53858+ *(grarg + execlen + x) = ' ';
53859+ }
53860+
53861+ execlen += len;
53862+ *(grarg + execlen) = ' ';
53863+ *(grarg + execlen + 1) = '\0';
53864+ execlen++;
53865+ }
53866+
53867+ log:
53868+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53869+ bprm->file->f_path.mnt, grarg);
53870+ mutex_unlock(&gr_exec_arg_mutex);
53871+#endif
53872+ return;
53873+}
53874+
53875+#ifdef CONFIG_GRKERNSEC
53876+extern int gr_acl_is_capable(const int cap);
53877+extern int gr_acl_is_capable_nolog(const int cap);
53878+extern int gr_chroot_is_capable(const int cap);
53879+extern int gr_chroot_is_capable_nolog(const int cap);
53880+#endif
53881+
53882+const char *captab_log[] = {
53883+ "CAP_CHOWN",
53884+ "CAP_DAC_OVERRIDE",
53885+ "CAP_DAC_READ_SEARCH",
53886+ "CAP_FOWNER",
53887+ "CAP_FSETID",
53888+ "CAP_KILL",
53889+ "CAP_SETGID",
53890+ "CAP_SETUID",
53891+ "CAP_SETPCAP",
53892+ "CAP_LINUX_IMMUTABLE",
53893+ "CAP_NET_BIND_SERVICE",
53894+ "CAP_NET_BROADCAST",
53895+ "CAP_NET_ADMIN",
53896+ "CAP_NET_RAW",
53897+ "CAP_IPC_LOCK",
53898+ "CAP_IPC_OWNER",
53899+ "CAP_SYS_MODULE",
53900+ "CAP_SYS_RAWIO",
53901+ "CAP_SYS_CHROOT",
53902+ "CAP_SYS_PTRACE",
53903+ "CAP_SYS_PACCT",
53904+ "CAP_SYS_ADMIN",
53905+ "CAP_SYS_BOOT",
53906+ "CAP_SYS_NICE",
53907+ "CAP_SYS_RESOURCE",
53908+ "CAP_SYS_TIME",
53909+ "CAP_SYS_TTY_CONFIG",
53910+ "CAP_MKNOD",
53911+ "CAP_LEASE",
53912+ "CAP_AUDIT_WRITE",
53913+ "CAP_AUDIT_CONTROL",
53914+ "CAP_SETFCAP",
53915+ "CAP_MAC_OVERRIDE",
53916+ "CAP_MAC_ADMIN",
53917+ "CAP_SYSLOG",
53918+ "CAP_WAKE_ALARM"
53919+};
53920+
53921+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
53922+
53923+int gr_is_capable(const int cap)
53924+{
53925+#ifdef CONFIG_GRKERNSEC
53926+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
53927+ return 1;
53928+ return 0;
53929+#else
53930+ return 1;
53931+#endif
53932+}
53933+
53934+int gr_is_capable_nolog(const int cap)
53935+{
53936+#ifdef CONFIG_GRKERNSEC
53937+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
53938+ return 1;
53939+ return 0;
53940+#else
53941+ return 1;
53942+#endif
53943+}
53944+
53945+EXPORT_SYMBOL(gr_is_capable);
53946+EXPORT_SYMBOL(gr_is_capable_nolog);
53947diff -urNp linux-3.1.1/grsecurity/grsec_fifo.c linux-3.1.1/grsecurity/grsec_fifo.c
53948--- linux-3.1.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53949+++ linux-3.1.1/grsecurity/grsec_fifo.c 2011-11-16 18:40:31.000000000 -0500
53950@@ -0,0 +1,24 @@
53951+#include <linux/kernel.h>
53952+#include <linux/sched.h>
53953+#include <linux/fs.h>
53954+#include <linux/file.h>
53955+#include <linux/grinternal.h>
53956+
53957+int
53958+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53959+ const struct dentry *dir, const int flag, const int acc_mode)
53960+{
53961+#ifdef CONFIG_GRKERNSEC_FIFO
53962+ const struct cred *cred = current_cred();
53963+
53964+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53965+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53966+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53967+ (cred->fsuid != dentry->d_inode->i_uid)) {
53968+ if (!inode_permission(dentry->d_inode, acc_mode))
53969+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53970+ return -EACCES;
53971+ }
53972+#endif
53973+ return 0;
53974+}
53975diff -urNp linux-3.1.1/grsecurity/grsec_fork.c linux-3.1.1/grsecurity/grsec_fork.c
53976--- linux-3.1.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53977+++ linux-3.1.1/grsecurity/grsec_fork.c 2011-11-16 18:40:31.000000000 -0500
53978@@ -0,0 +1,23 @@
53979+#include <linux/kernel.h>
53980+#include <linux/sched.h>
53981+#include <linux/grsecurity.h>
53982+#include <linux/grinternal.h>
53983+#include <linux/errno.h>
53984+
53985+void
53986+gr_log_forkfail(const int retval)
53987+{
53988+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53989+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53990+ switch (retval) {
53991+ case -EAGAIN:
53992+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53993+ break;
53994+ case -ENOMEM:
53995+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53996+ break;
53997+ }
53998+ }
53999+#endif
54000+ return;
54001+}
54002diff -urNp linux-3.1.1/grsecurity/grsec_init.c linux-3.1.1/grsecurity/grsec_init.c
54003--- linux-3.1.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54004+++ linux-3.1.1/grsecurity/grsec_init.c 2011-11-16 18:40:31.000000000 -0500
54005@@ -0,0 +1,269 @@
54006+#include <linux/kernel.h>
54007+#include <linux/sched.h>
54008+#include <linux/mm.h>
54009+#include <linux/gracl.h>
54010+#include <linux/slab.h>
54011+#include <linux/vmalloc.h>
54012+#include <linux/percpu.h>
54013+#include <linux/module.h>
54014+
54015+int grsec_enable_brute;
54016+int grsec_enable_link;
54017+int grsec_enable_dmesg;
54018+int grsec_enable_harden_ptrace;
54019+int grsec_enable_fifo;
54020+int grsec_enable_execlog;
54021+int grsec_enable_signal;
54022+int grsec_enable_forkfail;
54023+int grsec_enable_audit_ptrace;
54024+int grsec_enable_time;
54025+int grsec_enable_audit_textrel;
54026+int grsec_enable_group;
54027+int grsec_audit_gid;
54028+int grsec_enable_chdir;
54029+int grsec_enable_mount;
54030+int grsec_enable_rofs;
54031+int grsec_enable_chroot_findtask;
54032+int grsec_enable_chroot_mount;
54033+int grsec_enable_chroot_shmat;
54034+int grsec_enable_chroot_fchdir;
54035+int grsec_enable_chroot_double;
54036+int grsec_enable_chroot_pivot;
54037+int grsec_enable_chroot_chdir;
54038+int grsec_enable_chroot_chmod;
54039+int grsec_enable_chroot_mknod;
54040+int grsec_enable_chroot_nice;
54041+int grsec_enable_chroot_execlog;
54042+int grsec_enable_chroot_caps;
54043+int grsec_enable_chroot_sysctl;
54044+int grsec_enable_chroot_unix;
54045+int grsec_enable_tpe;
54046+int grsec_tpe_gid;
54047+int grsec_enable_blackhole;
54048+#ifdef CONFIG_IPV6_MODULE
54049+EXPORT_SYMBOL(grsec_enable_blackhole);
54050+#endif
54051+int grsec_lastack_retries;
54052+int grsec_enable_tpe_all;
54053+int grsec_enable_tpe_invert;
54054+int grsec_enable_socket_all;
54055+int grsec_socket_all_gid;
54056+int grsec_enable_socket_client;
54057+int grsec_socket_client_gid;
54058+int grsec_enable_socket_server;
54059+int grsec_socket_server_gid;
54060+int grsec_resource_logging;
54061+int grsec_disable_privio;
54062+int grsec_enable_log_rwxmaps;
54063+int grsec_lock;
54064+
54065+DEFINE_SPINLOCK(grsec_alert_lock);
54066+unsigned long grsec_alert_wtime = 0;
54067+unsigned long grsec_alert_fyet = 0;
54068+
54069+DEFINE_SPINLOCK(grsec_audit_lock);
54070+
54071+DEFINE_RWLOCK(grsec_exec_file_lock);
54072+
54073+char *gr_shared_page[4];
54074+
54075+char *gr_alert_log_fmt;
54076+char *gr_audit_log_fmt;
54077+char *gr_alert_log_buf;
54078+char *gr_audit_log_buf;
54079+
54080+extern struct gr_arg *gr_usermode;
54081+extern unsigned char *gr_system_salt;
54082+extern unsigned char *gr_system_sum;
54083+
54084+void __init
54085+grsecurity_init(void)
54086+{
54087+ int j;
54088+ /* create the per-cpu shared pages */
54089+
54090+#ifdef CONFIG_X86
54091+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54092+#endif
54093+
54094+ for (j = 0; j < 4; j++) {
54095+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54096+ if (gr_shared_page[j] == NULL) {
54097+ panic("Unable to allocate grsecurity shared page");
54098+ return;
54099+ }
54100+ }
54101+
54102+ /* allocate log buffers */
54103+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54104+ if (!gr_alert_log_fmt) {
54105+ panic("Unable to allocate grsecurity alert log format buffer");
54106+ return;
54107+ }
54108+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54109+ if (!gr_audit_log_fmt) {
54110+ panic("Unable to allocate grsecurity audit log format buffer");
54111+ return;
54112+ }
54113+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54114+ if (!gr_alert_log_buf) {
54115+ panic("Unable to allocate grsecurity alert log buffer");
54116+ return;
54117+ }
54118+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54119+ if (!gr_audit_log_buf) {
54120+ panic("Unable to allocate grsecurity audit log buffer");
54121+ return;
54122+ }
54123+
54124+ /* allocate memory for authentication structure */
54125+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54126+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54127+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54128+
54129+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54130+ panic("Unable to allocate grsecurity authentication structure");
54131+ return;
54132+ }
54133+
54134+
54135+#ifdef CONFIG_GRKERNSEC_IO
54136+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54137+ grsec_disable_privio = 1;
54138+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54139+ grsec_disable_privio = 1;
54140+#else
54141+ grsec_disable_privio = 0;
54142+#endif
54143+#endif
54144+
54145+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54146+ /* for backward compatibility, tpe_invert always defaults to on if
54147+ enabled in the kernel
54148+ */
54149+ grsec_enable_tpe_invert = 1;
54150+#endif
54151+
54152+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54153+#ifndef CONFIG_GRKERNSEC_SYSCTL
54154+ grsec_lock = 1;
54155+#endif
54156+
54157+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54158+ grsec_enable_audit_textrel = 1;
54159+#endif
54160+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54161+ grsec_enable_log_rwxmaps = 1;
54162+#endif
54163+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54164+ grsec_enable_group = 1;
54165+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54166+#endif
54167+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54168+ grsec_enable_chdir = 1;
54169+#endif
54170+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54171+ grsec_enable_harden_ptrace = 1;
54172+#endif
54173+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54174+ grsec_enable_mount = 1;
54175+#endif
54176+#ifdef CONFIG_GRKERNSEC_LINK
54177+ grsec_enable_link = 1;
54178+#endif
54179+#ifdef CONFIG_GRKERNSEC_BRUTE
54180+ grsec_enable_brute = 1;
54181+#endif
54182+#ifdef CONFIG_GRKERNSEC_DMESG
54183+ grsec_enable_dmesg = 1;
54184+#endif
54185+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54186+ grsec_enable_blackhole = 1;
54187+ grsec_lastack_retries = 4;
54188+#endif
54189+#ifdef CONFIG_GRKERNSEC_FIFO
54190+ grsec_enable_fifo = 1;
54191+#endif
54192+#ifdef CONFIG_GRKERNSEC_EXECLOG
54193+ grsec_enable_execlog = 1;
54194+#endif
54195+#ifdef CONFIG_GRKERNSEC_SIGNAL
54196+ grsec_enable_signal = 1;
54197+#endif
54198+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54199+ grsec_enable_forkfail = 1;
54200+#endif
54201+#ifdef CONFIG_GRKERNSEC_TIME
54202+ grsec_enable_time = 1;
54203+#endif
54204+#ifdef CONFIG_GRKERNSEC_RESLOG
54205+ grsec_resource_logging = 1;
54206+#endif
54207+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54208+ grsec_enable_chroot_findtask = 1;
54209+#endif
54210+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54211+ grsec_enable_chroot_unix = 1;
54212+#endif
54213+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54214+ grsec_enable_chroot_mount = 1;
54215+#endif
54216+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54217+ grsec_enable_chroot_fchdir = 1;
54218+#endif
54219+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54220+ grsec_enable_chroot_shmat = 1;
54221+#endif
54222+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54223+ grsec_enable_audit_ptrace = 1;
54224+#endif
54225+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54226+ grsec_enable_chroot_double = 1;
54227+#endif
54228+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54229+ grsec_enable_chroot_pivot = 1;
54230+#endif
54231+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54232+ grsec_enable_chroot_chdir = 1;
54233+#endif
54234+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54235+ grsec_enable_chroot_chmod = 1;
54236+#endif
54237+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54238+ grsec_enable_chroot_mknod = 1;
54239+#endif
54240+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54241+ grsec_enable_chroot_nice = 1;
54242+#endif
54243+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54244+ grsec_enable_chroot_execlog = 1;
54245+#endif
54246+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54247+ grsec_enable_chroot_caps = 1;
54248+#endif
54249+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54250+ grsec_enable_chroot_sysctl = 1;
54251+#endif
54252+#ifdef CONFIG_GRKERNSEC_TPE
54253+ grsec_enable_tpe = 1;
54254+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54255+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54256+ grsec_enable_tpe_all = 1;
54257+#endif
54258+#endif
54259+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54260+ grsec_enable_socket_all = 1;
54261+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54262+#endif
54263+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54264+ grsec_enable_socket_client = 1;
54265+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54266+#endif
54267+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54268+ grsec_enable_socket_server = 1;
54269+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54270+#endif
54271+#endif
54272+
54273+ return;
54274+}
54275diff -urNp linux-3.1.1/grsecurity/grsec_link.c linux-3.1.1/grsecurity/grsec_link.c
54276--- linux-3.1.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54277+++ linux-3.1.1/grsecurity/grsec_link.c 2011-11-16 18:40:31.000000000 -0500
54278@@ -0,0 +1,43 @@
54279+#include <linux/kernel.h>
54280+#include <linux/sched.h>
54281+#include <linux/fs.h>
54282+#include <linux/file.h>
54283+#include <linux/grinternal.h>
54284+
54285+int
54286+gr_handle_follow_link(const struct inode *parent,
54287+ const struct inode *inode,
54288+ const struct dentry *dentry, const struct vfsmount *mnt)
54289+{
54290+#ifdef CONFIG_GRKERNSEC_LINK
54291+ const struct cred *cred = current_cred();
54292+
54293+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54294+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54295+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54296+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54297+ return -EACCES;
54298+ }
54299+#endif
54300+ return 0;
54301+}
54302+
54303+int
54304+gr_handle_hardlink(const struct dentry *dentry,
54305+ const struct vfsmount *mnt,
54306+ struct inode *inode, const int mode, const char *to)
54307+{
54308+#ifdef CONFIG_GRKERNSEC_LINK
54309+ const struct cred *cred = current_cred();
54310+
54311+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54312+ (!S_ISREG(mode) || (mode & S_ISUID) ||
54313+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54314+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54315+ !capable(CAP_FOWNER) && cred->uid) {
54316+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54317+ return -EPERM;
54318+ }
54319+#endif
54320+ return 0;
54321+}
54322diff -urNp linux-3.1.1/grsecurity/grsec_log.c linux-3.1.1/grsecurity/grsec_log.c
54323--- linux-3.1.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54324+++ linux-3.1.1/grsecurity/grsec_log.c 2011-11-16 18:40:31.000000000 -0500
54325@@ -0,0 +1,322 @@
54326+#include <linux/kernel.h>
54327+#include <linux/sched.h>
54328+#include <linux/file.h>
54329+#include <linux/tty.h>
54330+#include <linux/fs.h>
54331+#include <linux/grinternal.h>
54332+
54333+#ifdef CONFIG_TREE_PREEMPT_RCU
54334+#define DISABLE_PREEMPT() preempt_disable()
54335+#define ENABLE_PREEMPT() preempt_enable()
54336+#else
54337+#define DISABLE_PREEMPT()
54338+#define ENABLE_PREEMPT()
54339+#endif
54340+
54341+#define BEGIN_LOCKS(x) \
54342+ DISABLE_PREEMPT(); \
54343+ rcu_read_lock(); \
54344+ read_lock(&tasklist_lock); \
54345+ read_lock(&grsec_exec_file_lock); \
54346+ if (x != GR_DO_AUDIT) \
54347+ spin_lock(&grsec_alert_lock); \
54348+ else \
54349+ spin_lock(&grsec_audit_lock)
54350+
54351+#define END_LOCKS(x) \
54352+ if (x != GR_DO_AUDIT) \
54353+ spin_unlock(&grsec_alert_lock); \
54354+ else \
54355+ spin_unlock(&grsec_audit_lock); \
54356+ read_unlock(&grsec_exec_file_lock); \
54357+ read_unlock(&tasklist_lock); \
54358+ rcu_read_unlock(); \
54359+ ENABLE_PREEMPT(); \
54360+ if (x == GR_DONT_AUDIT) \
54361+ gr_handle_alertkill(current)
54362+
54363+enum {
54364+ FLOODING,
54365+ NO_FLOODING
54366+};
54367+
54368+extern char *gr_alert_log_fmt;
54369+extern char *gr_audit_log_fmt;
54370+extern char *gr_alert_log_buf;
54371+extern char *gr_audit_log_buf;
54372+
54373+static int gr_log_start(int audit)
54374+{
54375+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54376+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54377+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54378+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
54379+ unsigned long curr_secs = get_seconds();
54380+
54381+ if (audit == GR_DO_AUDIT)
54382+ goto set_fmt;
54383+
54384+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
54385+ grsec_alert_wtime = curr_secs;
54386+ grsec_alert_fyet = 0;
54387+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
54388+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54389+ grsec_alert_fyet++;
54390+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54391+ grsec_alert_wtime = curr_secs;
54392+ grsec_alert_fyet++;
54393+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54394+ return FLOODING;
54395+ }
54396+ else return FLOODING;
54397+
54398+set_fmt:
54399+#endif
54400+ memset(buf, 0, PAGE_SIZE);
54401+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54402+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54403+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54404+ } else if (current->signal->curr_ip) {
54405+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54406+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54407+ } else if (gr_acl_is_enabled()) {
54408+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54409+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54410+ } else {
54411+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54412+ strcpy(buf, fmt);
54413+ }
54414+
54415+ return NO_FLOODING;
54416+}
54417+
54418+static void gr_log_middle(int audit, const char *msg, va_list ap)
54419+ __attribute__ ((format (printf, 2, 0)));
54420+
54421+static void gr_log_middle(int audit, const char *msg, va_list ap)
54422+{
54423+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54424+ unsigned int len = strlen(buf);
54425+
54426+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54427+
54428+ return;
54429+}
54430+
54431+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54432+ __attribute__ ((format (printf, 2, 3)));
54433+
54434+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54435+{
54436+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54437+ unsigned int len = strlen(buf);
54438+ va_list ap;
54439+
54440+ va_start(ap, msg);
54441+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54442+ va_end(ap);
54443+
54444+ return;
54445+}
54446+
54447+static void gr_log_end(int audit, int append_default)
54448+{
54449+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54450+
54451+ if (append_default) {
54452+ unsigned int len = strlen(buf);
54453+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54454+ }
54455+
54456+ printk("%s\n", buf);
54457+
54458+ return;
54459+}
54460+
54461+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54462+{
54463+ int logtype;
54464+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54465+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54466+ void *voidptr = NULL;
54467+ int num1 = 0, num2 = 0;
54468+ unsigned long ulong1 = 0, ulong2 = 0;
54469+ struct dentry *dentry = NULL;
54470+ struct vfsmount *mnt = NULL;
54471+ struct file *file = NULL;
54472+ struct task_struct *task = NULL;
54473+ const struct cred *cred, *pcred;
54474+ va_list ap;
54475+
54476+ BEGIN_LOCKS(audit);
54477+ logtype = gr_log_start(audit);
54478+ if (logtype == FLOODING) {
54479+ END_LOCKS(audit);
54480+ return;
54481+ }
54482+ va_start(ap, argtypes);
54483+ switch (argtypes) {
54484+ case GR_TTYSNIFF:
54485+ task = va_arg(ap, struct task_struct *);
54486+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54487+ break;
54488+ case GR_SYSCTL_HIDDEN:
54489+ str1 = va_arg(ap, char *);
54490+ gr_log_middle_varargs(audit, msg, result, str1);
54491+ break;
54492+ case GR_RBAC:
54493+ dentry = va_arg(ap, struct dentry *);
54494+ mnt = va_arg(ap, struct vfsmount *);
54495+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54496+ break;
54497+ case GR_RBAC_STR:
54498+ dentry = va_arg(ap, struct dentry *);
54499+ mnt = va_arg(ap, struct vfsmount *);
54500+ str1 = va_arg(ap, char *);
54501+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54502+ break;
54503+ case GR_STR_RBAC:
54504+ str1 = va_arg(ap, char *);
54505+ dentry = va_arg(ap, struct dentry *);
54506+ mnt = va_arg(ap, struct vfsmount *);
54507+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54508+ break;
54509+ case GR_RBAC_MODE2:
54510+ dentry = va_arg(ap, struct dentry *);
54511+ mnt = va_arg(ap, struct vfsmount *);
54512+ str1 = va_arg(ap, char *);
54513+ str2 = va_arg(ap, char *);
54514+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54515+ break;
54516+ case GR_RBAC_MODE3:
54517+ dentry = va_arg(ap, struct dentry *);
54518+ mnt = va_arg(ap, struct vfsmount *);
54519+ str1 = va_arg(ap, char *);
54520+ str2 = va_arg(ap, char *);
54521+ str3 = va_arg(ap, char *);
54522+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54523+ break;
54524+ case GR_FILENAME:
54525+ dentry = va_arg(ap, struct dentry *);
54526+ mnt = va_arg(ap, struct vfsmount *);
54527+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54528+ break;
54529+ case GR_STR_FILENAME:
54530+ str1 = va_arg(ap, char *);
54531+ dentry = va_arg(ap, struct dentry *);
54532+ mnt = va_arg(ap, struct vfsmount *);
54533+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54534+ break;
54535+ case GR_FILENAME_STR:
54536+ dentry = va_arg(ap, struct dentry *);
54537+ mnt = va_arg(ap, struct vfsmount *);
54538+ str1 = va_arg(ap, char *);
54539+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54540+ break;
54541+ case GR_FILENAME_TWO_INT:
54542+ dentry = va_arg(ap, struct dentry *);
54543+ mnt = va_arg(ap, struct vfsmount *);
54544+ num1 = va_arg(ap, int);
54545+ num2 = va_arg(ap, int);
54546+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54547+ break;
54548+ case GR_FILENAME_TWO_INT_STR:
54549+ dentry = va_arg(ap, struct dentry *);
54550+ mnt = va_arg(ap, struct vfsmount *);
54551+ num1 = va_arg(ap, int);
54552+ num2 = va_arg(ap, int);
54553+ str1 = va_arg(ap, char *);
54554+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54555+ break;
54556+ case GR_TEXTREL:
54557+ file = va_arg(ap, struct file *);
54558+ ulong1 = va_arg(ap, unsigned long);
54559+ ulong2 = va_arg(ap, unsigned long);
54560+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54561+ break;
54562+ case GR_PTRACE:
54563+ task = va_arg(ap, struct task_struct *);
54564+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54565+ break;
54566+ case GR_RESOURCE:
54567+ task = va_arg(ap, struct task_struct *);
54568+ cred = __task_cred(task);
54569+ pcred = __task_cred(task->real_parent);
54570+ ulong1 = va_arg(ap, unsigned long);
54571+ str1 = va_arg(ap, char *);
54572+ ulong2 = va_arg(ap, unsigned long);
54573+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54574+ break;
54575+ case GR_CAP:
54576+ task = va_arg(ap, struct task_struct *);
54577+ cred = __task_cred(task);
54578+ pcred = __task_cred(task->real_parent);
54579+ str1 = va_arg(ap, char *);
54580+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54581+ break;
54582+ case GR_SIG:
54583+ str1 = va_arg(ap, char *);
54584+ voidptr = va_arg(ap, void *);
54585+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54586+ break;
54587+ case GR_SIG2:
54588+ task = va_arg(ap, struct task_struct *);
54589+ cred = __task_cred(task);
54590+ pcred = __task_cred(task->real_parent);
54591+ num1 = va_arg(ap, int);
54592+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54593+ break;
54594+ case GR_CRASH1:
54595+ task = va_arg(ap, struct task_struct *);
54596+ cred = __task_cred(task);
54597+ pcred = __task_cred(task->real_parent);
54598+ ulong1 = va_arg(ap, unsigned long);
54599+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54600+ break;
54601+ case GR_CRASH2:
54602+ task = va_arg(ap, struct task_struct *);
54603+ cred = __task_cred(task);
54604+ pcred = __task_cred(task->real_parent);
54605+ ulong1 = va_arg(ap, unsigned long);
54606+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54607+ break;
54608+ case GR_RWXMAP:
54609+ file = va_arg(ap, struct file *);
54610+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54611+ break;
54612+ case GR_PSACCT:
54613+ {
54614+ unsigned int wday, cday;
54615+ __u8 whr, chr;
54616+ __u8 wmin, cmin;
54617+ __u8 wsec, csec;
54618+ char cur_tty[64] = { 0 };
54619+ char parent_tty[64] = { 0 };
54620+
54621+ task = va_arg(ap, struct task_struct *);
54622+ wday = va_arg(ap, unsigned int);
54623+ cday = va_arg(ap, unsigned int);
54624+ whr = va_arg(ap, int);
54625+ chr = va_arg(ap, int);
54626+ wmin = va_arg(ap, int);
54627+ cmin = va_arg(ap, int);
54628+ wsec = va_arg(ap, int);
54629+ csec = va_arg(ap, int);
54630+ ulong1 = va_arg(ap, unsigned long);
54631+ cred = __task_cred(task);
54632+ pcred = __task_cred(task->real_parent);
54633+
54634+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54635+ }
54636+ break;
54637+ default:
54638+ gr_log_middle(audit, msg, ap);
54639+ }
54640+ va_end(ap);
54641+ // these don't need DEFAULTSECARGS printed on the end
54642+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
54643+ gr_log_end(audit, 0);
54644+ else
54645+ gr_log_end(audit, 1);
54646+ END_LOCKS(audit);
54647+}
54648diff -urNp linux-3.1.1/grsecurity/grsec_mem.c linux-3.1.1/grsecurity/grsec_mem.c
54649--- linux-3.1.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54650+++ linux-3.1.1/grsecurity/grsec_mem.c 2011-11-16 18:40:31.000000000 -0500
54651@@ -0,0 +1,33 @@
54652+#include <linux/kernel.h>
54653+#include <linux/sched.h>
54654+#include <linux/mm.h>
54655+#include <linux/mman.h>
54656+#include <linux/grinternal.h>
54657+
54658+void
54659+gr_handle_ioperm(void)
54660+{
54661+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54662+ return;
54663+}
54664+
54665+void
54666+gr_handle_iopl(void)
54667+{
54668+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54669+ return;
54670+}
54671+
54672+void
54673+gr_handle_mem_readwrite(u64 from, u64 to)
54674+{
54675+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54676+ return;
54677+}
54678+
54679+void
54680+gr_handle_vm86(void)
54681+{
54682+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54683+ return;
54684+}
54685diff -urNp linux-3.1.1/grsecurity/grsec_mount.c linux-3.1.1/grsecurity/grsec_mount.c
54686--- linux-3.1.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54687+++ linux-3.1.1/grsecurity/grsec_mount.c 2011-11-16 18:40:31.000000000 -0500
54688@@ -0,0 +1,62 @@
54689+#include <linux/kernel.h>
54690+#include <linux/sched.h>
54691+#include <linux/mount.h>
54692+#include <linux/grsecurity.h>
54693+#include <linux/grinternal.h>
54694+
54695+void
54696+gr_log_remount(const char *devname, const int retval)
54697+{
54698+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54699+ if (grsec_enable_mount && (retval >= 0))
54700+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54701+#endif
54702+ return;
54703+}
54704+
54705+void
54706+gr_log_unmount(const char *devname, const int retval)
54707+{
54708+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54709+ if (grsec_enable_mount && (retval >= 0))
54710+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54711+#endif
54712+ return;
54713+}
54714+
54715+void
54716+gr_log_mount(const char *from, const char *to, const int retval)
54717+{
54718+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54719+ if (grsec_enable_mount && (retval >= 0))
54720+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54721+#endif
54722+ return;
54723+}
54724+
54725+int
54726+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54727+{
54728+#ifdef CONFIG_GRKERNSEC_ROFS
54729+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54730+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54731+ return -EPERM;
54732+ } else
54733+ return 0;
54734+#endif
54735+ return 0;
54736+}
54737+
54738+int
54739+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54740+{
54741+#ifdef CONFIG_GRKERNSEC_ROFS
54742+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54743+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54744+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54745+ return -EPERM;
54746+ } else
54747+ return 0;
54748+#endif
54749+ return 0;
54750+}
54751diff -urNp linux-3.1.1/grsecurity/grsec_pax.c linux-3.1.1/grsecurity/grsec_pax.c
54752--- linux-3.1.1/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54753+++ linux-3.1.1/grsecurity/grsec_pax.c 2011-11-16 18:40:31.000000000 -0500
54754@@ -0,0 +1,36 @@
54755+#include <linux/kernel.h>
54756+#include <linux/sched.h>
54757+#include <linux/mm.h>
54758+#include <linux/file.h>
54759+#include <linux/grinternal.h>
54760+#include <linux/grsecurity.h>
54761+
54762+void
54763+gr_log_textrel(struct vm_area_struct * vma)
54764+{
54765+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54766+ if (grsec_enable_audit_textrel)
54767+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54768+#endif
54769+ return;
54770+}
54771+
54772+void
54773+gr_log_rwxmmap(struct file *file)
54774+{
54775+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54776+ if (grsec_enable_log_rwxmaps)
54777+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54778+#endif
54779+ return;
54780+}
54781+
54782+void
54783+gr_log_rwxmprotect(struct file *file)
54784+{
54785+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54786+ if (grsec_enable_log_rwxmaps)
54787+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54788+#endif
54789+ return;
54790+}
54791diff -urNp linux-3.1.1/grsecurity/grsec_ptrace.c linux-3.1.1/grsecurity/grsec_ptrace.c
54792--- linux-3.1.1/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54793+++ linux-3.1.1/grsecurity/grsec_ptrace.c 2011-11-16 18:40:31.000000000 -0500
54794@@ -0,0 +1,14 @@
54795+#include <linux/kernel.h>
54796+#include <linux/sched.h>
54797+#include <linux/grinternal.h>
54798+#include <linux/grsecurity.h>
54799+
54800+void
54801+gr_audit_ptrace(struct task_struct *task)
54802+{
54803+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54804+ if (grsec_enable_audit_ptrace)
54805+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54806+#endif
54807+ return;
54808+}
54809diff -urNp linux-3.1.1/grsecurity/grsec_sig.c linux-3.1.1/grsecurity/grsec_sig.c
54810--- linux-3.1.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54811+++ linux-3.1.1/grsecurity/grsec_sig.c 2011-11-16 18:40:31.000000000 -0500
54812@@ -0,0 +1,206 @@
54813+#include <linux/kernel.h>
54814+#include <linux/sched.h>
54815+#include <linux/delay.h>
54816+#include <linux/grsecurity.h>
54817+#include <linux/grinternal.h>
54818+#include <linux/hardirq.h>
54819+
54820+char *signames[] = {
54821+ [SIGSEGV] = "Segmentation fault",
54822+ [SIGILL] = "Illegal instruction",
54823+ [SIGABRT] = "Abort",
54824+ [SIGBUS] = "Invalid alignment/Bus error"
54825+};
54826+
54827+void
54828+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54829+{
54830+#ifdef CONFIG_GRKERNSEC_SIGNAL
54831+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54832+ (sig == SIGABRT) || (sig == SIGBUS))) {
54833+ if (t->pid == current->pid) {
54834+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54835+ } else {
54836+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54837+ }
54838+ }
54839+#endif
54840+ return;
54841+}
54842+
54843+int
54844+gr_handle_signal(const struct task_struct *p, const int sig)
54845+{
54846+#ifdef CONFIG_GRKERNSEC
54847+ if (current->pid > 1 && gr_check_protected_task(p)) {
54848+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54849+ return -EPERM;
54850+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54851+ return -EPERM;
54852+ }
54853+#endif
54854+ return 0;
54855+}
54856+
54857+#ifdef CONFIG_GRKERNSEC
54858+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54859+
54860+int gr_fake_force_sig(int sig, struct task_struct *t)
54861+{
54862+ unsigned long int flags;
54863+ int ret, blocked, ignored;
54864+ struct k_sigaction *action;
54865+
54866+ spin_lock_irqsave(&t->sighand->siglock, flags);
54867+ action = &t->sighand->action[sig-1];
54868+ ignored = action->sa.sa_handler == SIG_IGN;
54869+ blocked = sigismember(&t->blocked, sig);
54870+ if (blocked || ignored) {
54871+ action->sa.sa_handler = SIG_DFL;
54872+ if (blocked) {
54873+ sigdelset(&t->blocked, sig);
54874+ recalc_sigpending_and_wake(t);
54875+ }
54876+ }
54877+ if (action->sa.sa_handler == SIG_DFL)
54878+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54879+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54880+
54881+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54882+
54883+ return ret;
54884+}
54885+#endif
54886+
54887+#ifdef CONFIG_GRKERNSEC_BRUTE
54888+#define GR_USER_BAN_TIME (15 * 60)
54889+
54890+static int __get_dumpable(unsigned long mm_flags)
54891+{
54892+ int ret;
54893+
54894+ ret = mm_flags & MMF_DUMPABLE_MASK;
54895+ return (ret >= 2) ? 2 : ret;
54896+}
54897+#endif
54898+
54899+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54900+{
54901+#ifdef CONFIG_GRKERNSEC_BRUTE
54902+ uid_t uid = 0;
54903+
54904+ if (!grsec_enable_brute)
54905+ return;
54906+
54907+ rcu_read_lock();
54908+ read_lock(&tasklist_lock);
54909+ read_lock(&grsec_exec_file_lock);
54910+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54911+ p->real_parent->brute = 1;
54912+ else {
54913+ const struct cred *cred = __task_cred(p), *cred2;
54914+ struct task_struct *tsk, *tsk2;
54915+
54916+ if (!__get_dumpable(mm_flags) && cred->uid) {
54917+ struct user_struct *user;
54918+
54919+ uid = cred->uid;
54920+
54921+ /* this is put upon execution past expiration */
54922+ user = find_user(uid);
54923+ if (user == NULL)
54924+ goto unlock;
54925+ user->banned = 1;
54926+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54927+ if (user->ban_expires == ~0UL)
54928+ user->ban_expires--;
54929+
54930+ do_each_thread(tsk2, tsk) {
54931+ cred2 = __task_cred(tsk);
54932+ if (tsk != p && cred2->uid == uid)
54933+ gr_fake_force_sig(SIGKILL, tsk);
54934+ } while_each_thread(tsk2, tsk);
54935+ }
54936+ }
54937+unlock:
54938+ read_unlock(&grsec_exec_file_lock);
54939+ read_unlock(&tasklist_lock);
54940+ rcu_read_unlock();
54941+
54942+ if (uid)
54943+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54944+
54945+#endif
54946+ return;
54947+}
54948+
54949+void gr_handle_brute_check(void)
54950+{
54951+#ifdef CONFIG_GRKERNSEC_BRUTE
54952+ if (current->brute)
54953+ msleep(30 * 1000);
54954+#endif
54955+ return;
54956+}
54957+
54958+void gr_handle_kernel_exploit(void)
54959+{
54960+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54961+ const struct cred *cred;
54962+ struct task_struct *tsk, *tsk2;
54963+ struct user_struct *user;
54964+ uid_t uid;
54965+
54966+ if (in_irq() || in_serving_softirq() || in_nmi())
54967+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54968+
54969+ uid = current_uid();
54970+
54971+ if (uid == 0)
54972+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54973+ else {
54974+ /* kill all the processes of this user, hold a reference
54975+ to their creds struct, and prevent them from creating
54976+ another process until system reset
54977+ */
54978+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54979+ /* we intentionally leak this ref */
54980+ user = get_uid(current->cred->user);
54981+ if (user) {
54982+ user->banned = 1;
54983+ user->ban_expires = ~0UL;
54984+ }
54985+
54986+ read_lock(&tasklist_lock);
54987+ do_each_thread(tsk2, tsk) {
54988+ cred = __task_cred(tsk);
54989+ if (cred->uid == uid)
54990+ gr_fake_force_sig(SIGKILL, tsk);
54991+ } while_each_thread(tsk2, tsk);
54992+ read_unlock(&tasklist_lock);
54993+ }
54994+#endif
54995+}
54996+
54997+int __gr_process_user_ban(struct user_struct *user)
54998+{
54999+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55000+ if (unlikely(user->banned)) {
55001+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55002+ user->banned = 0;
55003+ user->ban_expires = 0;
55004+ free_uid(user);
55005+ } else
55006+ return -EPERM;
55007+ }
55008+#endif
55009+ return 0;
55010+}
55011+
55012+int gr_process_user_ban(void)
55013+{
55014+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55015+ return __gr_process_user_ban(current->cred->user);
55016+#endif
55017+ return 0;
55018+}
55019diff -urNp linux-3.1.1/grsecurity/grsec_sock.c linux-3.1.1/grsecurity/grsec_sock.c
55020--- linux-3.1.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55021+++ linux-3.1.1/grsecurity/grsec_sock.c 2011-11-16 18:40:31.000000000 -0500
55022@@ -0,0 +1,244 @@
55023+#include <linux/kernel.h>
55024+#include <linux/module.h>
55025+#include <linux/sched.h>
55026+#include <linux/file.h>
55027+#include <linux/net.h>
55028+#include <linux/in.h>
55029+#include <linux/ip.h>
55030+#include <net/sock.h>
55031+#include <net/inet_sock.h>
55032+#include <linux/grsecurity.h>
55033+#include <linux/grinternal.h>
55034+#include <linux/gracl.h>
55035+
55036+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55037+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55038+
55039+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55040+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55041+
55042+#ifdef CONFIG_UNIX_MODULE
55043+EXPORT_SYMBOL(gr_acl_handle_unix);
55044+EXPORT_SYMBOL(gr_acl_handle_mknod);
55045+EXPORT_SYMBOL(gr_handle_chroot_unix);
55046+EXPORT_SYMBOL(gr_handle_create);
55047+#endif
55048+
55049+#ifdef CONFIG_GRKERNSEC
55050+#define gr_conn_table_size 32749
55051+struct conn_table_entry {
55052+ struct conn_table_entry *next;
55053+ struct signal_struct *sig;
55054+};
55055+
55056+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55057+DEFINE_SPINLOCK(gr_conn_table_lock);
55058+
55059+extern const char * gr_socktype_to_name(unsigned char type);
55060+extern const char * gr_proto_to_name(unsigned char proto);
55061+extern const char * gr_sockfamily_to_name(unsigned char family);
55062+
55063+static __inline__ int
55064+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55065+{
55066+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55067+}
55068+
55069+static __inline__ int
55070+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55071+ __u16 sport, __u16 dport)
55072+{
55073+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55074+ sig->gr_sport == sport && sig->gr_dport == dport))
55075+ return 1;
55076+ else
55077+ return 0;
55078+}
55079+
55080+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55081+{
55082+ struct conn_table_entry **match;
55083+ unsigned int index;
55084+
55085+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55086+ sig->gr_sport, sig->gr_dport,
55087+ gr_conn_table_size);
55088+
55089+ newent->sig = sig;
55090+
55091+ match = &gr_conn_table[index];
55092+ newent->next = *match;
55093+ *match = newent;
55094+
55095+ return;
55096+}
55097+
55098+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55099+{
55100+ struct conn_table_entry *match, *last = NULL;
55101+ unsigned int index;
55102+
55103+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55104+ sig->gr_sport, sig->gr_dport,
55105+ gr_conn_table_size);
55106+
55107+ match = gr_conn_table[index];
55108+ while (match && !conn_match(match->sig,
55109+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55110+ sig->gr_dport)) {
55111+ last = match;
55112+ match = match->next;
55113+ }
55114+
55115+ if (match) {
55116+ if (last)
55117+ last->next = match->next;
55118+ else
55119+ gr_conn_table[index] = NULL;
55120+ kfree(match);
55121+ }
55122+
55123+ return;
55124+}
55125+
55126+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55127+ __u16 sport, __u16 dport)
55128+{
55129+ struct conn_table_entry *match;
55130+ unsigned int index;
55131+
55132+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55133+
55134+ match = gr_conn_table[index];
55135+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55136+ match = match->next;
55137+
55138+ if (match)
55139+ return match->sig;
55140+ else
55141+ return NULL;
55142+}
55143+
55144+#endif
55145+
55146+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55147+{
55148+#ifdef CONFIG_GRKERNSEC
55149+ struct signal_struct *sig = task->signal;
55150+ struct conn_table_entry *newent;
55151+
55152+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55153+ if (newent == NULL)
55154+ return;
55155+ /* no bh lock needed since we are called with bh disabled */
55156+ spin_lock(&gr_conn_table_lock);
55157+ gr_del_task_from_ip_table_nolock(sig);
55158+ sig->gr_saddr = inet->inet_rcv_saddr;
55159+ sig->gr_daddr = inet->inet_daddr;
55160+ sig->gr_sport = inet->inet_sport;
55161+ sig->gr_dport = inet->inet_dport;
55162+ gr_add_to_task_ip_table_nolock(sig, newent);
55163+ spin_unlock(&gr_conn_table_lock);
55164+#endif
55165+ return;
55166+}
55167+
55168+void gr_del_task_from_ip_table(struct task_struct *task)
55169+{
55170+#ifdef CONFIG_GRKERNSEC
55171+ spin_lock_bh(&gr_conn_table_lock);
55172+ gr_del_task_from_ip_table_nolock(task->signal);
55173+ spin_unlock_bh(&gr_conn_table_lock);
55174+#endif
55175+ return;
55176+}
55177+
55178+void
55179+gr_attach_curr_ip(const struct sock *sk)
55180+{
55181+#ifdef CONFIG_GRKERNSEC
55182+ struct signal_struct *p, *set;
55183+ const struct inet_sock *inet = inet_sk(sk);
55184+
55185+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55186+ return;
55187+
55188+ set = current->signal;
55189+
55190+ spin_lock_bh(&gr_conn_table_lock);
55191+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55192+ inet->inet_dport, inet->inet_sport);
55193+ if (unlikely(p != NULL)) {
55194+ set->curr_ip = p->curr_ip;
55195+ set->used_accept = 1;
55196+ gr_del_task_from_ip_table_nolock(p);
55197+ spin_unlock_bh(&gr_conn_table_lock);
55198+ return;
55199+ }
55200+ spin_unlock_bh(&gr_conn_table_lock);
55201+
55202+ set->curr_ip = inet->inet_daddr;
55203+ set->used_accept = 1;
55204+#endif
55205+ return;
55206+}
55207+
55208+int
55209+gr_handle_sock_all(const int family, const int type, const int protocol)
55210+{
55211+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55212+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55213+ (family != AF_UNIX)) {
55214+ if (family == AF_INET)
55215+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55216+ else
55217+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55218+ return -EACCES;
55219+ }
55220+#endif
55221+ return 0;
55222+}
55223+
55224+int
55225+gr_handle_sock_server(const struct sockaddr *sck)
55226+{
55227+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55228+ if (grsec_enable_socket_server &&
55229+ in_group_p(grsec_socket_server_gid) &&
55230+ sck && (sck->sa_family != AF_UNIX) &&
55231+ (sck->sa_family != AF_LOCAL)) {
55232+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55233+ return -EACCES;
55234+ }
55235+#endif
55236+ return 0;
55237+}
55238+
55239+int
55240+gr_handle_sock_server_other(const struct sock *sck)
55241+{
55242+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55243+ if (grsec_enable_socket_server &&
55244+ in_group_p(grsec_socket_server_gid) &&
55245+ sck && (sck->sk_family != AF_UNIX) &&
55246+ (sck->sk_family != AF_LOCAL)) {
55247+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55248+ return -EACCES;
55249+ }
55250+#endif
55251+ return 0;
55252+}
55253+
55254+int
55255+gr_handle_sock_client(const struct sockaddr *sck)
55256+{
55257+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55258+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55259+ sck && (sck->sa_family != AF_UNIX) &&
55260+ (sck->sa_family != AF_LOCAL)) {
55261+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55262+ return -EACCES;
55263+ }
55264+#endif
55265+ return 0;
55266+}
55267diff -urNp linux-3.1.1/grsecurity/grsec_sysctl.c linux-3.1.1/grsecurity/grsec_sysctl.c
55268--- linux-3.1.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55269+++ linux-3.1.1/grsecurity/grsec_sysctl.c 2011-11-16 18:40:31.000000000 -0500
55270@@ -0,0 +1,433 @@
55271+#include <linux/kernel.h>
55272+#include <linux/sched.h>
55273+#include <linux/sysctl.h>
55274+#include <linux/grsecurity.h>
55275+#include <linux/grinternal.h>
55276+
55277+int
55278+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55279+{
55280+#ifdef CONFIG_GRKERNSEC_SYSCTL
55281+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55282+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55283+ return -EACCES;
55284+ }
55285+#endif
55286+ return 0;
55287+}
55288+
55289+#ifdef CONFIG_GRKERNSEC_ROFS
55290+static int __maybe_unused one = 1;
55291+#endif
55292+
55293+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55294+struct ctl_table grsecurity_table[] = {
55295+#ifdef CONFIG_GRKERNSEC_SYSCTL
55296+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55297+#ifdef CONFIG_GRKERNSEC_IO
55298+ {
55299+ .procname = "disable_priv_io",
55300+ .data = &grsec_disable_privio,
55301+ .maxlen = sizeof(int),
55302+ .mode = 0600,
55303+ .proc_handler = &proc_dointvec,
55304+ },
55305+#endif
55306+#endif
55307+#ifdef CONFIG_GRKERNSEC_LINK
55308+ {
55309+ .procname = "linking_restrictions",
55310+ .data = &grsec_enable_link,
55311+ .maxlen = sizeof(int),
55312+ .mode = 0600,
55313+ .proc_handler = &proc_dointvec,
55314+ },
55315+#endif
55316+#ifdef CONFIG_GRKERNSEC_BRUTE
55317+ {
55318+ .procname = "deter_bruteforce",
55319+ .data = &grsec_enable_brute,
55320+ .maxlen = sizeof(int),
55321+ .mode = 0600,
55322+ .proc_handler = &proc_dointvec,
55323+ },
55324+#endif
55325+#ifdef CONFIG_GRKERNSEC_FIFO
55326+ {
55327+ .procname = "fifo_restrictions",
55328+ .data = &grsec_enable_fifo,
55329+ .maxlen = sizeof(int),
55330+ .mode = 0600,
55331+ .proc_handler = &proc_dointvec,
55332+ },
55333+#endif
55334+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55335+ {
55336+ .procname = "ip_blackhole",
55337+ .data = &grsec_enable_blackhole,
55338+ .maxlen = sizeof(int),
55339+ .mode = 0600,
55340+ .proc_handler = &proc_dointvec,
55341+ },
55342+ {
55343+ .procname = "lastack_retries",
55344+ .data = &grsec_lastack_retries,
55345+ .maxlen = sizeof(int),
55346+ .mode = 0600,
55347+ .proc_handler = &proc_dointvec,
55348+ },
55349+#endif
55350+#ifdef CONFIG_GRKERNSEC_EXECLOG
55351+ {
55352+ .procname = "exec_logging",
55353+ .data = &grsec_enable_execlog,
55354+ .maxlen = sizeof(int),
55355+ .mode = 0600,
55356+ .proc_handler = &proc_dointvec,
55357+ },
55358+#endif
55359+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55360+ {
55361+ .procname = "rwxmap_logging",
55362+ .data = &grsec_enable_log_rwxmaps,
55363+ .maxlen = sizeof(int),
55364+ .mode = 0600,
55365+ .proc_handler = &proc_dointvec,
55366+ },
55367+#endif
55368+#ifdef CONFIG_GRKERNSEC_SIGNAL
55369+ {
55370+ .procname = "signal_logging",
55371+ .data = &grsec_enable_signal,
55372+ .maxlen = sizeof(int),
55373+ .mode = 0600,
55374+ .proc_handler = &proc_dointvec,
55375+ },
55376+#endif
55377+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55378+ {
55379+ .procname = "forkfail_logging",
55380+ .data = &grsec_enable_forkfail,
55381+ .maxlen = sizeof(int),
55382+ .mode = 0600,
55383+ .proc_handler = &proc_dointvec,
55384+ },
55385+#endif
55386+#ifdef CONFIG_GRKERNSEC_TIME
55387+ {
55388+ .procname = "timechange_logging",
55389+ .data = &grsec_enable_time,
55390+ .maxlen = sizeof(int),
55391+ .mode = 0600,
55392+ .proc_handler = &proc_dointvec,
55393+ },
55394+#endif
55395+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55396+ {
55397+ .procname = "chroot_deny_shmat",
55398+ .data = &grsec_enable_chroot_shmat,
55399+ .maxlen = sizeof(int),
55400+ .mode = 0600,
55401+ .proc_handler = &proc_dointvec,
55402+ },
55403+#endif
55404+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55405+ {
55406+ .procname = "chroot_deny_unix",
55407+ .data = &grsec_enable_chroot_unix,
55408+ .maxlen = sizeof(int),
55409+ .mode = 0600,
55410+ .proc_handler = &proc_dointvec,
55411+ },
55412+#endif
55413+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55414+ {
55415+ .procname = "chroot_deny_mount",
55416+ .data = &grsec_enable_chroot_mount,
55417+ .maxlen = sizeof(int),
55418+ .mode = 0600,
55419+ .proc_handler = &proc_dointvec,
55420+ },
55421+#endif
55422+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55423+ {
55424+ .procname = "chroot_deny_fchdir",
55425+ .data = &grsec_enable_chroot_fchdir,
55426+ .maxlen = sizeof(int),
55427+ .mode = 0600,
55428+ .proc_handler = &proc_dointvec,
55429+ },
55430+#endif
55431+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55432+ {
55433+ .procname = "chroot_deny_chroot",
55434+ .data = &grsec_enable_chroot_double,
55435+ .maxlen = sizeof(int),
55436+ .mode = 0600,
55437+ .proc_handler = &proc_dointvec,
55438+ },
55439+#endif
55440+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55441+ {
55442+ .procname = "chroot_deny_pivot",
55443+ .data = &grsec_enable_chroot_pivot,
55444+ .maxlen = sizeof(int),
55445+ .mode = 0600,
55446+ .proc_handler = &proc_dointvec,
55447+ },
55448+#endif
55449+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55450+ {
55451+ .procname = "chroot_enforce_chdir",
55452+ .data = &grsec_enable_chroot_chdir,
55453+ .maxlen = sizeof(int),
55454+ .mode = 0600,
55455+ .proc_handler = &proc_dointvec,
55456+ },
55457+#endif
55458+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55459+ {
55460+ .procname = "chroot_deny_chmod",
55461+ .data = &grsec_enable_chroot_chmod,
55462+ .maxlen = sizeof(int),
55463+ .mode = 0600,
55464+ .proc_handler = &proc_dointvec,
55465+ },
55466+#endif
55467+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55468+ {
55469+ .procname = "chroot_deny_mknod",
55470+ .data = &grsec_enable_chroot_mknod,
55471+ .maxlen = sizeof(int),
55472+ .mode = 0600,
55473+ .proc_handler = &proc_dointvec,
55474+ },
55475+#endif
55476+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55477+ {
55478+ .procname = "chroot_restrict_nice",
55479+ .data = &grsec_enable_chroot_nice,
55480+ .maxlen = sizeof(int),
55481+ .mode = 0600,
55482+ .proc_handler = &proc_dointvec,
55483+ },
55484+#endif
55485+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55486+ {
55487+ .procname = "chroot_execlog",
55488+ .data = &grsec_enable_chroot_execlog,
55489+ .maxlen = sizeof(int),
55490+ .mode = 0600,
55491+ .proc_handler = &proc_dointvec,
55492+ },
55493+#endif
55494+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55495+ {
55496+ .procname = "chroot_caps",
55497+ .data = &grsec_enable_chroot_caps,
55498+ .maxlen = sizeof(int),
55499+ .mode = 0600,
55500+ .proc_handler = &proc_dointvec,
55501+ },
55502+#endif
55503+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55504+ {
55505+ .procname = "chroot_deny_sysctl",
55506+ .data = &grsec_enable_chroot_sysctl,
55507+ .maxlen = sizeof(int),
55508+ .mode = 0600,
55509+ .proc_handler = &proc_dointvec,
55510+ },
55511+#endif
55512+#ifdef CONFIG_GRKERNSEC_TPE
55513+ {
55514+ .procname = "tpe",
55515+ .data = &grsec_enable_tpe,
55516+ .maxlen = sizeof(int),
55517+ .mode = 0600,
55518+ .proc_handler = &proc_dointvec,
55519+ },
55520+ {
55521+ .procname = "tpe_gid",
55522+ .data = &grsec_tpe_gid,
55523+ .maxlen = sizeof(int),
55524+ .mode = 0600,
55525+ .proc_handler = &proc_dointvec,
55526+ },
55527+#endif
55528+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55529+ {
55530+ .procname = "tpe_invert",
55531+ .data = &grsec_enable_tpe_invert,
55532+ .maxlen = sizeof(int),
55533+ .mode = 0600,
55534+ .proc_handler = &proc_dointvec,
55535+ },
55536+#endif
55537+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55538+ {
55539+ .procname = "tpe_restrict_all",
55540+ .data = &grsec_enable_tpe_all,
55541+ .maxlen = sizeof(int),
55542+ .mode = 0600,
55543+ .proc_handler = &proc_dointvec,
55544+ },
55545+#endif
55546+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55547+ {
55548+ .procname = "socket_all",
55549+ .data = &grsec_enable_socket_all,
55550+ .maxlen = sizeof(int),
55551+ .mode = 0600,
55552+ .proc_handler = &proc_dointvec,
55553+ },
55554+ {
55555+ .procname = "socket_all_gid",
55556+ .data = &grsec_socket_all_gid,
55557+ .maxlen = sizeof(int),
55558+ .mode = 0600,
55559+ .proc_handler = &proc_dointvec,
55560+ },
55561+#endif
55562+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55563+ {
55564+ .procname = "socket_client",
55565+ .data = &grsec_enable_socket_client,
55566+ .maxlen = sizeof(int),
55567+ .mode = 0600,
55568+ .proc_handler = &proc_dointvec,
55569+ },
55570+ {
55571+ .procname = "socket_client_gid",
55572+ .data = &grsec_socket_client_gid,
55573+ .maxlen = sizeof(int),
55574+ .mode = 0600,
55575+ .proc_handler = &proc_dointvec,
55576+ },
55577+#endif
55578+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55579+ {
55580+ .procname = "socket_server",
55581+ .data = &grsec_enable_socket_server,
55582+ .maxlen = sizeof(int),
55583+ .mode = 0600,
55584+ .proc_handler = &proc_dointvec,
55585+ },
55586+ {
55587+ .procname = "socket_server_gid",
55588+ .data = &grsec_socket_server_gid,
55589+ .maxlen = sizeof(int),
55590+ .mode = 0600,
55591+ .proc_handler = &proc_dointvec,
55592+ },
55593+#endif
55594+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55595+ {
55596+ .procname = "audit_group",
55597+ .data = &grsec_enable_group,
55598+ .maxlen = sizeof(int),
55599+ .mode = 0600,
55600+ .proc_handler = &proc_dointvec,
55601+ },
55602+ {
55603+ .procname = "audit_gid",
55604+ .data = &grsec_audit_gid,
55605+ .maxlen = sizeof(int),
55606+ .mode = 0600,
55607+ .proc_handler = &proc_dointvec,
55608+ },
55609+#endif
55610+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55611+ {
55612+ .procname = "audit_chdir",
55613+ .data = &grsec_enable_chdir,
55614+ .maxlen = sizeof(int),
55615+ .mode = 0600,
55616+ .proc_handler = &proc_dointvec,
55617+ },
55618+#endif
55619+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55620+ {
55621+ .procname = "audit_mount",
55622+ .data = &grsec_enable_mount,
55623+ .maxlen = sizeof(int),
55624+ .mode = 0600,
55625+ .proc_handler = &proc_dointvec,
55626+ },
55627+#endif
55628+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55629+ {
55630+ .procname = "audit_textrel",
55631+ .data = &grsec_enable_audit_textrel,
55632+ .maxlen = sizeof(int),
55633+ .mode = 0600,
55634+ .proc_handler = &proc_dointvec,
55635+ },
55636+#endif
55637+#ifdef CONFIG_GRKERNSEC_DMESG
55638+ {
55639+ .procname = "dmesg",
55640+ .data = &grsec_enable_dmesg,
55641+ .maxlen = sizeof(int),
55642+ .mode = 0600,
55643+ .proc_handler = &proc_dointvec,
55644+ },
55645+#endif
55646+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55647+ {
55648+ .procname = "chroot_findtask",
55649+ .data = &grsec_enable_chroot_findtask,
55650+ .maxlen = sizeof(int),
55651+ .mode = 0600,
55652+ .proc_handler = &proc_dointvec,
55653+ },
55654+#endif
55655+#ifdef CONFIG_GRKERNSEC_RESLOG
55656+ {
55657+ .procname = "resource_logging",
55658+ .data = &grsec_resource_logging,
55659+ .maxlen = sizeof(int),
55660+ .mode = 0600,
55661+ .proc_handler = &proc_dointvec,
55662+ },
55663+#endif
55664+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55665+ {
55666+ .procname = "audit_ptrace",
55667+ .data = &grsec_enable_audit_ptrace,
55668+ .maxlen = sizeof(int),
55669+ .mode = 0600,
55670+ .proc_handler = &proc_dointvec,
55671+ },
55672+#endif
55673+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55674+ {
55675+ .procname = "harden_ptrace",
55676+ .data = &grsec_enable_harden_ptrace,
55677+ .maxlen = sizeof(int),
55678+ .mode = 0600,
55679+ .proc_handler = &proc_dointvec,
55680+ },
55681+#endif
55682+ {
55683+ .procname = "grsec_lock",
55684+ .data = &grsec_lock,
55685+ .maxlen = sizeof(int),
55686+ .mode = 0600,
55687+ .proc_handler = &proc_dointvec,
55688+ },
55689+#endif
55690+#ifdef CONFIG_GRKERNSEC_ROFS
55691+ {
55692+ .procname = "romount_protect",
55693+ .data = &grsec_enable_rofs,
55694+ .maxlen = sizeof(int),
55695+ .mode = 0600,
55696+ .proc_handler = &proc_dointvec_minmax,
55697+ .extra1 = &one,
55698+ .extra2 = &one,
55699+ },
55700+#endif
55701+ { }
55702+};
55703+#endif
55704diff -urNp linux-3.1.1/grsecurity/grsec_time.c linux-3.1.1/grsecurity/grsec_time.c
55705--- linux-3.1.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55706+++ linux-3.1.1/grsecurity/grsec_time.c 2011-11-16 18:40:31.000000000 -0500
55707@@ -0,0 +1,16 @@
55708+#include <linux/kernel.h>
55709+#include <linux/sched.h>
55710+#include <linux/grinternal.h>
55711+#include <linux/module.h>
55712+
55713+void
55714+gr_log_timechange(void)
55715+{
55716+#ifdef CONFIG_GRKERNSEC_TIME
55717+ if (grsec_enable_time)
55718+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55719+#endif
55720+ return;
55721+}
55722+
55723+EXPORT_SYMBOL(gr_log_timechange);
55724diff -urNp linux-3.1.1/grsecurity/grsec_tpe.c linux-3.1.1/grsecurity/grsec_tpe.c
55725--- linux-3.1.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55726+++ linux-3.1.1/grsecurity/grsec_tpe.c 2011-11-16 18:40:31.000000000 -0500
55727@@ -0,0 +1,39 @@
55728+#include <linux/kernel.h>
55729+#include <linux/sched.h>
55730+#include <linux/file.h>
55731+#include <linux/fs.h>
55732+#include <linux/grinternal.h>
55733+
55734+extern int gr_acl_tpe_check(void);
55735+
55736+int
55737+gr_tpe_allow(const struct file *file)
55738+{
55739+#ifdef CONFIG_GRKERNSEC
55740+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55741+ const struct cred *cred = current_cred();
55742+
55743+ if (cred->uid && ((grsec_enable_tpe &&
55744+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55745+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55746+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55747+#else
55748+ in_group_p(grsec_tpe_gid)
55749+#endif
55750+ ) || gr_acl_tpe_check()) &&
55751+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55752+ (inode->i_mode & S_IWOTH))))) {
55753+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55754+ return 0;
55755+ }
55756+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55757+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55758+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55759+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55760+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55761+ return 0;
55762+ }
55763+#endif
55764+#endif
55765+ return 1;
55766+}
55767diff -urNp linux-3.1.1/grsecurity/grsum.c linux-3.1.1/grsecurity/grsum.c
55768--- linux-3.1.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55769+++ linux-3.1.1/grsecurity/grsum.c 2011-11-16 18:40:31.000000000 -0500
55770@@ -0,0 +1,61 @@
55771+#include <linux/err.h>
55772+#include <linux/kernel.h>
55773+#include <linux/sched.h>
55774+#include <linux/mm.h>
55775+#include <linux/scatterlist.h>
55776+#include <linux/crypto.h>
55777+#include <linux/gracl.h>
55778+
55779+
55780+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55781+#error "crypto and sha256 must be built into the kernel"
55782+#endif
55783+
55784+int
55785+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55786+{
55787+ char *p;
55788+ struct crypto_hash *tfm;
55789+ struct hash_desc desc;
55790+ struct scatterlist sg;
55791+ unsigned char temp_sum[GR_SHA_LEN];
55792+ volatile int retval = 0;
55793+ volatile int dummy = 0;
55794+ unsigned int i;
55795+
55796+ sg_init_table(&sg, 1);
55797+
55798+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55799+ if (IS_ERR(tfm)) {
55800+ /* should never happen, since sha256 should be built in */
55801+ return 1;
55802+ }
55803+
55804+ desc.tfm = tfm;
55805+ desc.flags = 0;
55806+
55807+ crypto_hash_init(&desc);
55808+
55809+ p = salt;
55810+ sg_set_buf(&sg, p, GR_SALT_LEN);
55811+ crypto_hash_update(&desc, &sg, sg.length);
55812+
55813+ p = entry->pw;
55814+ sg_set_buf(&sg, p, strlen(p));
55815+
55816+ crypto_hash_update(&desc, &sg, sg.length);
55817+
55818+ crypto_hash_final(&desc, temp_sum);
55819+
55820+ memset(entry->pw, 0, GR_PW_LEN);
55821+
55822+ for (i = 0; i < GR_SHA_LEN; i++)
55823+ if (sum[i] != temp_sum[i])
55824+ retval = 1;
55825+ else
55826+ dummy = 1; // waste a cycle
55827+
55828+ crypto_free_hash(tfm);
55829+
55830+ return retval;
55831+}
55832diff -urNp linux-3.1.1/grsecurity/Kconfig linux-3.1.1/grsecurity/Kconfig
55833--- linux-3.1.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55834+++ linux-3.1.1/grsecurity/Kconfig 2011-11-16 18:40:31.000000000 -0500
55835@@ -0,0 +1,1037 @@
55836+#
55837+# grecurity configuration
55838+#
55839+
55840+menu "Grsecurity"
55841+
55842+config GRKERNSEC
55843+ bool "Grsecurity"
55844+ select CRYPTO
55845+ select CRYPTO_SHA256
55846+ help
55847+ If you say Y here, you will be able to configure many features
55848+ that will enhance the security of your system. It is highly
55849+ recommended that you say Y here and read through the help
55850+ for each option so that you fully understand the features and
55851+ can evaluate their usefulness for your machine.
55852+
55853+choice
55854+ prompt "Security Level"
55855+ depends on GRKERNSEC
55856+ default GRKERNSEC_CUSTOM
55857+
55858+config GRKERNSEC_LOW
55859+ bool "Low"
55860+ select GRKERNSEC_LINK
55861+ select GRKERNSEC_FIFO
55862+ select GRKERNSEC_RANDNET
55863+ select GRKERNSEC_DMESG
55864+ select GRKERNSEC_CHROOT
55865+ select GRKERNSEC_CHROOT_CHDIR
55866+
55867+ help
55868+ If you choose this option, several of the grsecurity options will
55869+ be enabled that will give you greater protection against a number
55870+ of attacks, while assuring that none of your software will have any
55871+ conflicts with the additional security measures. If you run a lot
55872+ of unusual software, or you are having problems with the higher
55873+ security levels, you should say Y here. With this option, the
55874+ following features are enabled:
55875+
55876+ - Linking restrictions
55877+ - FIFO restrictions
55878+ - Restricted dmesg
55879+ - Enforced chdir("/") on chroot
55880+ - Runtime module disabling
55881+
55882+config GRKERNSEC_MEDIUM
55883+ bool "Medium"
55884+ select PAX
55885+ select PAX_EI_PAX
55886+ select PAX_PT_PAX_FLAGS
55887+ select PAX_HAVE_ACL_FLAGS
55888+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55889+ select GRKERNSEC_CHROOT
55890+ select GRKERNSEC_CHROOT_SYSCTL
55891+ select GRKERNSEC_LINK
55892+ select GRKERNSEC_FIFO
55893+ select GRKERNSEC_DMESG
55894+ select GRKERNSEC_RANDNET
55895+ select GRKERNSEC_FORKFAIL
55896+ select GRKERNSEC_TIME
55897+ select GRKERNSEC_SIGNAL
55898+ select GRKERNSEC_CHROOT
55899+ select GRKERNSEC_CHROOT_UNIX
55900+ select GRKERNSEC_CHROOT_MOUNT
55901+ select GRKERNSEC_CHROOT_PIVOT
55902+ select GRKERNSEC_CHROOT_DOUBLE
55903+ select GRKERNSEC_CHROOT_CHDIR
55904+ select GRKERNSEC_CHROOT_MKNOD
55905+ select GRKERNSEC_PROC
55906+ select GRKERNSEC_PROC_USERGROUP
55907+ select PAX_RANDUSTACK
55908+ select PAX_ASLR
55909+ select PAX_RANDMMAP
55910+ select PAX_REFCOUNT if (X86 || SPARC64)
55911+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55912+
55913+ help
55914+ If you say Y here, several features in addition to those included
55915+ in the low additional security level will be enabled. These
55916+ features provide even more security to your system, though in rare
55917+ cases they may be incompatible with very old or poorly written
55918+ software. If you enable this option, make sure that your auth
55919+ service (identd) is running as gid 1001. With this option,
55920+ the following features (in addition to those provided in the
55921+ low additional security level) will be enabled:
55922+
55923+ - Failed fork logging
55924+ - Time change logging
55925+ - Signal logging
55926+ - Deny mounts in chroot
55927+ - Deny double chrooting
55928+ - Deny sysctl writes in chroot
55929+ - Deny mknod in chroot
55930+ - Deny access to abstract AF_UNIX sockets out of chroot
55931+ - Deny pivot_root in chroot
55932+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55933+ - /proc restrictions with special GID set to 10 (usually wheel)
55934+ - Address Space Layout Randomization (ASLR)
55935+ - Prevent exploitation of most refcount overflows
55936+ - Bounds checking of copying between the kernel and userland
55937+
55938+config GRKERNSEC_HIGH
55939+ bool "High"
55940+ select GRKERNSEC_LINK
55941+ select GRKERNSEC_FIFO
55942+ select GRKERNSEC_DMESG
55943+ select GRKERNSEC_FORKFAIL
55944+ select GRKERNSEC_TIME
55945+ select GRKERNSEC_SIGNAL
55946+ select GRKERNSEC_CHROOT
55947+ select GRKERNSEC_CHROOT_SHMAT
55948+ select GRKERNSEC_CHROOT_UNIX
55949+ select GRKERNSEC_CHROOT_MOUNT
55950+ select GRKERNSEC_CHROOT_FCHDIR
55951+ select GRKERNSEC_CHROOT_PIVOT
55952+ select GRKERNSEC_CHROOT_DOUBLE
55953+ select GRKERNSEC_CHROOT_CHDIR
55954+ select GRKERNSEC_CHROOT_MKNOD
55955+ select GRKERNSEC_CHROOT_CAPS
55956+ select GRKERNSEC_CHROOT_SYSCTL
55957+ select GRKERNSEC_CHROOT_FINDTASK
55958+ select GRKERNSEC_SYSFS_RESTRICT
55959+ select GRKERNSEC_PROC
55960+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55961+ select GRKERNSEC_HIDESYM
55962+ select GRKERNSEC_BRUTE
55963+ select GRKERNSEC_PROC_USERGROUP
55964+ select GRKERNSEC_KMEM
55965+ select GRKERNSEC_RESLOG
55966+ select GRKERNSEC_RANDNET
55967+ select GRKERNSEC_PROC_ADD
55968+ select GRKERNSEC_CHROOT_CHMOD
55969+ select GRKERNSEC_CHROOT_NICE
55970+ select GRKERNSEC_AUDIT_MOUNT
55971+ select GRKERNSEC_MODHARDEN if (MODULES)
55972+ select GRKERNSEC_HARDEN_PTRACE
55973+ select GRKERNSEC_VM86 if (X86_32)
55974+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55975+ select PAX
55976+ select PAX_RANDUSTACK
55977+ select PAX_ASLR
55978+ select PAX_RANDMMAP
55979+ select PAX_NOEXEC
55980+ select PAX_MPROTECT
55981+ select PAX_EI_PAX
55982+ select PAX_PT_PAX_FLAGS
55983+ select PAX_HAVE_ACL_FLAGS
55984+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55985+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55986+ select PAX_RANDKSTACK if (X86_TSC && X86)
55987+ select PAX_SEGMEXEC if (X86_32)
55988+ select PAX_PAGEEXEC
55989+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55990+ select PAX_EMUTRAMP if (PARISC)
55991+ select PAX_EMUSIGRT if (PARISC)
55992+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55993+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55994+ select PAX_REFCOUNT if (X86 || SPARC64)
55995+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
55996+ help
55997+ If you say Y here, many of the features of grsecurity will be
55998+ enabled, which will protect you against many kinds of attacks
55999+ against your system. The heightened security comes at a cost
56000+ of an increased chance of incompatibilities with rare software
56001+ on your machine. Since this security level enables PaX, you should
56002+ view <http://pax.grsecurity.net> and read about the PaX
56003+ project. While you are there, download chpax and run it on
56004+ binaries that cause problems with PaX. Also remember that
56005+ since the /proc restrictions are enabled, you must run your
56006+ identd as gid 1001. This security level enables the following
56007+ features in addition to those listed in the low and medium
56008+ security levels:
56009+
56010+ - Additional /proc restrictions
56011+ - Chmod restrictions in chroot
56012+ - No signals, ptrace, or viewing of processes outside of chroot
56013+ - Capability restrictions in chroot
56014+ - Deny fchdir out of chroot
56015+ - Priority restrictions in chroot
56016+ - Segmentation-based implementation of PaX
56017+ - Mprotect restrictions
56018+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56019+ - Kernel stack randomization
56020+ - Mount/unmount/remount logging
56021+ - Kernel symbol hiding
56022+ - Hardening of module auto-loading
56023+ - Ptrace restrictions
56024+ - Restricted vm86 mode
56025+ - Restricted sysfs/debugfs
56026+ - Active kernel exploit response
56027+
56028+config GRKERNSEC_CUSTOM
56029+ bool "Custom"
56030+ help
56031+ If you say Y here, you will be able to configure every grsecurity
56032+ option, which allows you to enable many more features that aren't
56033+ covered in the basic security levels. These additional features
56034+ include TPE, socket restrictions, and the sysctl system for
56035+ grsecurity. It is advised that you read through the help for
56036+ each option to determine its usefulness in your situation.
56037+
56038+endchoice
56039+
56040+menu "Address Space Protection"
56041+depends on GRKERNSEC
56042+
56043+config GRKERNSEC_KMEM
56044+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56045+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56046+ help
56047+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56048+ be written to or read from to modify or leak the contents of the running
56049+ kernel. /dev/port will also not be allowed to be opened. If you have module
56050+ support disabled, enabling this will close up four ways that are
56051+ currently used to insert malicious code into the running kernel.
56052+ Even with all these features enabled, we still highly recommend that
56053+ you use the RBAC system, as it is still possible for an attacker to
56054+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56055+ If you are not using XFree86, you may be able to stop this additional
56056+ case by enabling the 'Disable privileged I/O' option. Though nothing
56057+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56058+ but only to video memory, which is the only writing we allow in this
56059+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56060+ not be allowed to mprotect it with PROT_WRITE later.
56061+ It is highly recommended that you say Y here if you meet all the
56062+ conditions above.
56063+
56064+config GRKERNSEC_VM86
56065+ bool "Restrict VM86 mode"
56066+ depends on X86_32
56067+
56068+ help
56069+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56070+ make use of a special execution mode on 32bit x86 processors called
56071+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56072+ video cards and will still work with this option enabled. The purpose
56073+ of the option is to prevent exploitation of emulation errors in
56074+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56075+ Nearly all users should be able to enable this option.
56076+
56077+config GRKERNSEC_IO
56078+ bool "Disable privileged I/O"
56079+ depends on X86
56080+ select RTC_CLASS
56081+ select RTC_INTF_DEV
56082+ select RTC_DRV_CMOS
56083+
56084+ help
56085+ If you say Y here, all ioperm and iopl calls will return an error.
56086+ Ioperm and iopl can be used to modify the running kernel.
56087+ Unfortunately, some programs need this access to operate properly,
56088+ the most notable of which are XFree86 and hwclock. hwclock can be
56089+ remedied by having RTC support in the kernel, so real-time
56090+ clock support is enabled if this option is enabled, to ensure
56091+ that hwclock operates correctly. XFree86 still will not
56092+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56093+ IF YOU USE XFree86. If you use XFree86 and you still want to
56094+ protect your kernel against modification, use the RBAC system.
56095+
56096+config GRKERNSEC_PROC_MEMMAP
56097+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56098+ default y if (PAX_NOEXEC || PAX_ASLR)
56099+ depends on PAX_NOEXEC || PAX_ASLR
56100+ help
56101+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56102+ give no information about the addresses of its mappings if
56103+ PaX features that rely on random addresses are enabled on the task.
56104+ If you use PaX it is greatly recommended that you say Y here as it
56105+ closes up a hole that makes the full ASLR useless for suid
56106+ binaries.
56107+
56108+config GRKERNSEC_BRUTE
56109+ bool "Deter exploit bruteforcing"
56110+ help
56111+ If you say Y here, attempts to bruteforce exploits against forking
56112+ daemons such as apache or sshd, as well as against suid/sgid binaries
56113+ will be deterred. When a child of a forking daemon is killed by PaX
56114+ or crashes due to an illegal instruction or other suspicious signal,
56115+ the parent process will be delayed 30 seconds upon every subsequent
56116+ fork until the administrator is able to assess the situation and
56117+ restart the daemon.
56118+ In the suid/sgid case, the attempt is logged, the user has all their
56119+ processes terminated, and they are prevented from executing any further
56120+ processes for 15 minutes.
56121+ It is recommended that you also enable signal logging in the auditing
56122+ section so that logs are generated when a process triggers a suspicious
56123+ signal.
56124+ If the sysctl option is enabled, a sysctl option with name
56125+ "deter_bruteforce" is created.
56126+
56127+
56128+config GRKERNSEC_MODHARDEN
56129+ bool "Harden module auto-loading"
56130+ depends on MODULES
56131+ help
56132+ If you say Y here, module auto-loading in response to use of some
56133+ feature implemented by an unloaded module will be restricted to
56134+ root users. Enabling this option helps defend against attacks
56135+ by unprivileged users who abuse the auto-loading behavior to
56136+ cause a vulnerable module to load that is then exploited.
56137+
56138+ If this option prevents a legitimate use of auto-loading for a
56139+ non-root user, the administrator can execute modprobe manually
56140+ with the exact name of the module mentioned in the alert log.
56141+ Alternatively, the administrator can add the module to the list
56142+ of modules loaded at boot by modifying init scripts.
56143+
56144+ Modification of init scripts will most likely be needed on
56145+ Ubuntu servers with encrypted home directory support enabled,
56146+ as the first non-root user logging in will cause the ecb(aes),
56147+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56148+
56149+config GRKERNSEC_HIDESYM
56150+ bool "Hide kernel symbols"
56151+ help
56152+ If you say Y here, getting information on loaded modules, and
56153+ displaying all kernel symbols through a syscall will be restricted
56154+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56155+ /proc/kallsyms will be restricted to the root user. The RBAC
56156+ system can hide that entry even from root.
56157+
56158+ This option also prevents leaking of kernel addresses through
56159+ several /proc entries.
56160+
56161+ Note that this option is only effective provided the following
56162+ conditions are met:
56163+ 1) The kernel using grsecurity is not precompiled by some distribution
56164+ 2) You have also enabled GRKERNSEC_DMESG
56165+ 3) You are using the RBAC system and hiding other files such as your
56166+ kernel image and System.map. Alternatively, enabling this option
56167+ causes the permissions on /boot, /lib/modules, and the kernel
56168+ source directory to change at compile time to prevent
56169+ reading by non-root users.
56170+ If the above conditions are met, this option will aid in providing a
56171+ useful protection against local kernel exploitation of overflows
56172+ and arbitrary read/write vulnerabilities.
56173+
56174+config GRKERNSEC_KERN_LOCKOUT
56175+ bool "Active kernel exploit response"
56176+ depends on X86 || ARM || PPC || SPARC
56177+ help
56178+ If you say Y here, when a PaX alert is triggered due to suspicious
56179+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56180+ or an OOPs occurs due to bad memory accesses, instead of just
56181+ terminating the offending process (and potentially allowing
56182+ a subsequent exploit from the same user), we will take one of two
56183+ actions:
56184+ If the user was root, we will panic the system
56185+ If the user was non-root, we will log the attempt, terminate
56186+ all processes owned by the user, then prevent them from creating
56187+ any new processes until the system is restarted
56188+ This deters repeated kernel exploitation/bruteforcing attempts
56189+ and is useful for later forensics.
56190+
56191+endmenu
56192+menu "Role Based Access Control Options"
56193+depends on GRKERNSEC
56194+
56195+config GRKERNSEC_RBAC_DEBUG
56196+ bool
56197+
56198+config GRKERNSEC_NO_RBAC
56199+ bool "Disable RBAC system"
56200+ help
56201+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56202+ preventing the RBAC system from being enabled. You should only say Y
56203+ here if you have no intention of using the RBAC system, so as to prevent
56204+ an attacker with root access from misusing the RBAC system to hide files
56205+ and processes when loadable module support and /dev/[k]mem have been
56206+ locked down.
56207+
56208+config GRKERNSEC_ACL_HIDEKERN
56209+ bool "Hide kernel processes"
56210+ help
56211+ If you say Y here, all kernel threads will be hidden to all
56212+ processes but those whose subject has the "view hidden processes"
56213+ flag.
56214+
56215+config GRKERNSEC_ACL_MAXTRIES
56216+ int "Maximum tries before password lockout"
56217+ default 3
56218+ help
56219+ This option enforces the maximum number of times a user can attempt
56220+ to authorize themselves with the grsecurity RBAC system before being
56221+ denied the ability to attempt authorization again for a specified time.
56222+ The lower the number, the harder it will be to brute-force a password.
56223+
56224+config GRKERNSEC_ACL_TIMEOUT
56225+ int "Time to wait after max password tries, in seconds"
56226+ default 30
56227+ help
56228+ This option specifies the time the user must wait after attempting to
56229+ authorize to the RBAC system with the maximum number of invalid
56230+ passwords. The higher the number, the harder it will be to brute-force
56231+ a password.
56232+
56233+endmenu
56234+menu "Filesystem Protections"
56235+depends on GRKERNSEC
56236+
56237+config GRKERNSEC_PROC
56238+ bool "Proc restrictions"
56239+ help
56240+ If you say Y here, the permissions of the /proc filesystem
56241+ will be altered to enhance system security and privacy. You MUST
56242+ choose either a user only restriction or a user and group restriction.
56243+ Depending upon the option you choose, you can either restrict users to
56244+ see only the processes they themselves run, or choose a group that can
56245+ view all processes and files normally restricted to root if you choose
56246+ the "restrict to user only" option. NOTE: If you're running identd as
56247+ a non-root user, you will have to run it as the group you specify here.
56248+
56249+config GRKERNSEC_PROC_USER
56250+ bool "Restrict /proc to user only"
56251+ depends on GRKERNSEC_PROC
56252+ help
56253+ If you say Y here, non-root users will only be able to view their own
56254+ processes, and restricts them from viewing network-related information,
56255+ and viewing kernel symbol and module information.
56256+
56257+config GRKERNSEC_PROC_USERGROUP
56258+ bool "Allow special group"
56259+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56260+ help
56261+ If you say Y here, you will be able to select a group that will be
56262+ able to view all processes and network-related information. If you've
56263+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56264+ remain hidden. This option is useful if you want to run identd as
56265+ a non-root user.
56266+
56267+config GRKERNSEC_PROC_GID
56268+ int "GID for special group"
56269+ depends on GRKERNSEC_PROC_USERGROUP
56270+ default 1001
56271+
56272+config GRKERNSEC_PROC_ADD
56273+ bool "Additional restrictions"
56274+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56275+ help
56276+ If you say Y here, additional restrictions will be placed on
56277+ /proc that keep normal users from viewing device information and
56278+ slabinfo information that could be useful for exploits.
56279+
56280+config GRKERNSEC_LINK
56281+ bool "Linking restrictions"
56282+ help
56283+ If you say Y here, /tmp race exploits will be prevented, since users
56284+ will no longer be able to follow symlinks owned by other users in
56285+ world-writable +t directories (e.g. /tmp), unless the owner of the
56286+ symlink is the owner of the directory. users will also not be
56287+ able to hardlink to files they do not own. If the sysctl option is
56288+ enabled, a sysctl option with name "linking_restrictions" is created.
56289+
56290+config GRKERNSEC_FIFO
56291+ bool "FIFO restrictions"
56292+ help
56293+ If you say Y here, users will not be able to write to FIFOs they don't
56294+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56295+ the FIFO is the same owner of the directory it's held in. If the sysctl
56296+ option is enabled, a sysctl option with name "fifo_restrictions" is
56297+ created.
56298+
56299+config GRKERNSEC_SYSFS_RESTRICT
56300+ bool "Sysfs/debugfs restriction"
56301+ depends on SYSFS
56302+ help
56303+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56304+ any filesystem normally mounted under it (e.g. debugfs) will only
56305+ be accessible by root. These filesystems generally provide access
56306+ to hardware and debug information that isn't appropriate for unprivileged
56307+ users of the system. Sysfs and debugfs have also become a large source
56308+ of new vulnerabilities, ranging from infoleaks to local compromise.
56309+ There has been very little oversight with an eye toward security involved
56310+ in adding new exporters of information to these filesystems, so their
56311+ use is discouraged.
56312+ This option is equivalent to a chmod 0700 of the mount paths.
56313+
56314+config GRKERNSEC_ROFS
56315+ bool "Runtime read-only mount protection"
56316+ help
56317+ If you say Y here, a sysctl option with name "romount_protect" will
56318+ be created. By setting this option to 1 at runtime, filesystems
56319+ will be protected in the following ways:
56320+ * No new writable mounts will be allowed
56321+ * Existing read-only mounts won't be able to be remounted read/write
56322+ * Write operations will be denied on all block devices
56323+ This option acts independently of grsec_lock: once it is set to 1,
56324+ it cannot be turned off. Therefore, please be mindful of the resulting
56325+ behavior if this option is enabled in an init script on a read-only
56326+ filesystem. This feature is mainly intended for secure embedded systems.
56327+
56328+config GRKERNSEC_CHROOT
56329+ bool "Chroot jail restrictions"
56330+ help
56331+ If you say Y here, you will be able to choose several options that will
56332+ make breaking out of a chrooted jail much more difficult. If you
56333+ encounter no software incompatibilities with the following options, it
56334+ is recommended that you enable each one.
56335+
56336+config GRKERNSEC_CHROOT_MOUNT
56337+ bool "Deny mounts"
56338+ depends on GRKERNSEC_CHROOT
56339+ help
56340+ If you say Y here, processes inside a chroot will not be able to
56341+ mount or remount filesystems. If the sysctl option is enabled, a
56342+ sysctl option with name "chroot_deny_mount" is created.
56343+
56344+config GRKERNSEC_CHROOT_DOUBLE
56345+ bool "Deny double-chroots"
56346+ depends on GRKERNSEC_CHROOT
56347+ help
56348+ If you say Y here, processes inside a chroot will not be able to chroot
56349+ again outside the chroot. This is a widely used method of breaking
56350+ out of a chroot jail and should not be allowed. If the sysctl
56351+ option is enabled, a sysctl option with name
56352+ "chroot_deny_chroot" is created.
56353+
56354+config GRKERNSEC_CHROOT_PIVOT
56355+ bool "Deny pivot_root in chroot"
56356+ depends on GRKERNSEC_CHROOT
56357+ help
56358+ If you say Y here, processes inside a chroot will not be able to use
56359+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56360+ works similar to chroot in that it changes the root filesystem. This
56361+ function could be misused in a chrooted process to attempt to break out
56362+ of the chroot, and therefore should not be allowed. If the sysctl
56363+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56364+ created.
56365+
56366+config GRKERNSEC_CHROOT_CHDIR
56367+ bool "Enforce chdir(\"/\") on all chroots"
56368+ depends on GRKERNSEC_CHROOT
56369+ help
56370+ If you say Y here, the current working directory of all newly-chrooted
56371+ applications will be set to the the root directory of the chroot.
56372+ The man page on chroot(2) states:
56373+ Note that this call does not change the current working
56374+ directory, so that `.' can be outside the tree rooted at
56375+ `/'. In particular, the super-user can escape from a
56376+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56377+
56378+ It is recommended that you say Y here, since it's not known to break
56379+ any software. If the sysctl option is enabled, a sysctl option with
56380+ name "chroot_enforce_chdir" is created.
56381+
56382+config GRKERNSEC_CHROOT_CHMOD
56383+ bool "Deny (f)chmod +s"
56384+ depends on GRKERNSEC_CHROOT
56385+ help
56386+ If you say Y here, processes inside a chroot will not be able to chmod
56387+ or fchmod files to make them have suid or sgid bits. This protects
56388+ against another published method of breaking a chroot. If the sysctl
56389+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56390+ created.
56391+
56392+config GRKERNSEC_CHROOT_FCHDIR
56393+ bool "Deny fchdir out of chroot"
56394+ depends on GRKERNSEC_CHROOT
56395+ help
56396+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56397+ to a file descriptor of the chrooting process that points to a directory
56398+ outside the filesystem will be stopped. If the sysctl option
56399+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56400+
56401+config GRKERNSEC_CHROOT_MKNOD
56402+ bool "Deny mknod"
56403+ depends on GRKERNSEC_CHROOT
56404+ help
56405+ If you say Y here, processes inside a chroot will not be allowed to
56406+ mknod. The problem with using mknod inside a chroot is that it
56407+ would allow an attacker to create a device entry that is the same
56408+ as one on the physical root of your system, which could range from
56409+ anything from the console device to a device for your harddrive (which
56410+ they could then use to wipe the drive or steal data). It is recommended
56411+ that you say Y here, unless you run into software incompatibilities.
56412+ If the sysctl option is enabled, a sysctl option with name
56413+ "chroot_deny_mknod" is created.
56414+
56415+config GRKERNSEC_CHROOT_SHMAT
56416+ bool "Deny shmat() out of chroot"
56417+ depends on GRKERNSEC_CHROOT
56418+ help
56419+ If you say Y here, processes inside a chroot will not be able to attach
56420+ to shared memory segments that were created outside of the chroot jail.
56421+ It is recommended that you say Y here. If the sysctl option is enabled,
56422+ a sysctl option with name "chroot_deny_shmat" is created.
56423+
56424+config GRKERNSEC_CHROOT_UNIX
56425+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56426+ depends on GRKERNSEC_CHROOT
56427+ help
56428+ If you say Y here, processes inside a chroot will not be able to
56429+ connect to abstract (meaning not belonging to a filesystem) Unix
56430+ domain sockets that were bound outside of a chroot. It is recommended
56431+ that you say Y here. If the sysctl option is enabled, a sysctl option
56432+ with name "chroot_deny_unix" is created.
56433+
56434+config GRKERNSEC_CHROOT_FINDTASK
56435+ bool "Protect outside processes"
56436+ depends on GRKERNSEC_CHROOT
56437+ help
56438+ If you say Y here, processes inside a chroot will not be able to
56439+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56440+ getsid, or view any process outside of the chroot. If the sysctl
56441+ option is enabled, a sysctl option with name "chroot_findtask" is
56442+ created.
56443+
56444+config GRKERNSEC_CHROOT_NICE
56445+ bool "Restrict priority changes"
56446+ depends on GRKERNSEC_CHROOT
56447+ help
56448+ If you say Y here, processes inside a chroot will not be able to raise
56449+ the priority of processes in the chroot, or alter the priority of
56450+ processes outside the chroot. This provides more security than simply
56451+ removing CAP_SYS_NICE from the process' capability set. If the
56452+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56453+ is created.
56454+
56455+config GRKERNSEC_CHROOT_SYSCTL
56456+ bool "Deny sysctl writes"
56457+ depends on GRKERNSEC_CHROOT
56458+ help
56459+ If you say Y here, an attacker in a chroot will not be able to
56460+ write to sysctl entries, either by sysctl(2) or through a /proc
56461+ interface. It is strongly recommended that you say Y here. If the
56462+ sysctl option is enabled, a sysctl option with name
56463+ "chroot_deny_sysctl" is created.
56464+
56465+config GRKERNSEC_CHROOT_CAPS
56466+ bool "Capability restrictions"
56467+ depends on GRKERNSEC_CHROOT
56468+ help
56469+ If you say Y here, the capabilities on all processes within a
56470+ chroot jail will be lowered to stop module insertion, raw i/o,
56471+ system and net admin tasks, rebooting the system, modifying immutable
56472+ files, modifying IPC owned by another, and changing the system time.
56473+ This is left an option because it can break some apps. Disable this
56474+ if your chrooted apps are having problems performing those kinds of
56475+ tasks. If the sysctl option is enabled, a sysctl option with
56476+ name "chroot_caps" is created.
56477+
56478+endmenu
56479+menu "Kernel Auditing"
56480+depends on GRKERNSEC
56481+
56482+config GRKERNSEC_AUDIT_GROUP
56483+ bool "Single group for auditing"
56484+ help
56485+ If you say Y here, the exec, chdir, and (un)mount logging features
56486+ will only operate on a group you specify. This option is recommended
56487+ if you only want to watch certain users instead of having a large
56488+ amount of logs from the entire system. If the sysctl option is enabled,
56489+ a sysctl option with name "audit_group" is created.
56490+
56491+config GRKERNSEC_AUDIT_GID
56492+ int "GID for auditing"
56493+ depends on GRKERNSEC_AUDIT_GROUP
56494+ default 1007
56495+
56496+config GRKERNSEC_EXECLOG
56497+ bool "Exec logging"
56498+ help
56499+ If you say Y here, all execve() calls will be logged (since the
56500+ other exec*() calls are frontends to execve(), all execution
56501+ will be logged). Useful for shell-servers that like to keep track
56502+ of their users. If the sysctl option is enabled, a sysctl option with
56503+ name "exec_logging" is created.
56504+ WARNING: This option when enabled will produce a LOT of logs, especially
56505+ on an active system.
56506+
56507+config GRKERNSEC_RESLOG
56508+ bool "Resource logging"
56509+ help
56510+ If you say Y here, all attempts to overstep resource limits will
56511+ be logged with the resource name, the requested size, and the current
56512+ limit. It is highly recommended that you say Y here. If the sysctl
56513+ option is enabled, a sysctl option with name "resource_logging" is
56514+ created. If the RBAC system is enabled, the sysctl value is ignored.
56515+
56516+config GRKERNSEC_CHROOT_EXECLOG
56517+ bool "Log execs within chroot"
56518+ help
56519+ If you say Y here, all executions inside a chroot jail will be logged
56520+ to syslog. This can cause a large amount of logs if certain
56521+ applications (eg. djb's daemontools) are installed on the system, and
56522+ is therefore left as an option. If the sysctl option is enabled, a
56523+ sysctl option with name "chroot_execlog" is created.
56524+
56525+config GRKERNSEC_AUDIT_PTRACE
56526+ bool "Ptrace logging"
56527+ help
56528+ If you say Y here, all attempts to attach to a process via ptrace
56529+ will be logged. If the sysctl option is enabled, a sysctl option
56530+ with name "audit_ptrace" is created.
56531+
56532+config GRKERNSEC_AUDIT_CHDIR
56533+ bool "Chdir logging"
56534+ help
56535+ If you say Y here, all chdir() calls will be logged. If the sysctl
56536+ option is enabled, a sysctl option with name "audit_chdir" is created.
56537+
56538+config GRKERNSEC_AUDIT_MOUNT
56539+ bool "(Un)Mount logging"
56540+ help
56541+ If you say Y here, all mounts and unmounts will be logged. If the
56542+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56543+ created.
56544+
56545+config GRKERNSEC_SIGNAL
56546+ bool "Signal logging"
56547+ help
56548+ If you say Y here, certain important signals will be logged, such as
56549+ SIGSEGV, which will as a result inform you of when a error in a program
56550+ occurred, which in some cases could mean a possible exploit attempt.
56551+ If the sysctl option is enabled, a sysctl option with name
56552+ "signal_logging" is created.
56553+
56554+config GRKERNSEC_FORKFAIL
56555+ bool "Fork failure logging"
56556+ help
56557+ If you say Y here, all failed fork() attempts will be logged.
56558+ This could suggest a fork bomb, or someone attempting to overstep
56559+ their process limit. If the sysctl option is enabled, a sysctl option
56560+ with name "forkfail_logging" is created.
56561+
56562+config GRKERNSEC_TIME
56563+ bool "Time change logging"
56564+ help
56565+ If you say Y here, any changes of the system clock will be logged.
56566+ If the sysctl option is enabled, a sysctl option with name
56567+ "timechange_logging" is created.
56568+
56569+config GRKERNSEC_PROC_IPADDR
56570+ bool "/proc/<pid>/ipaddr support"
56571+ help
56572+ If you say Y here, a new entry will be added to each /proc/<pid>
56573+ directory that contains the IP address of the person using the task.
56574+ The IP is carried across local TCP and AF_UNIX stream sockets.
56575+ This information can be useful for IDS/IPSes to perform remote response
56576+ to a local attack. The entry is readable by only the owner of the
56577+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56578+ the RBAC system), and thus does not create privacy concerns.
56579+
56580+config GRKERNSEC_RWXMAP_LOG
56581+ bool 'Denied RWX mmap/mprotect logging'
56582+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56583+ help
56584+ If you say Y here, calls to mmap() and mprotect() with explicit
56585+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56586+ denied by the PAX_MPROTECT feature. If the sysctl option is
56587+ enabled, a sysctl option with name "rwxmap_logging" is created.
56588+
56589+config GRKERNSEC_AUDIT_TEXTREL
56590+ bool 'ELF text relocations logging (READ HELP)'
56591+ depends on PAX_MPROTECT
56592+ help
56593+ If you say Y here, text relocations will be logged with the filename
56594+ of the offending library or binary. The purpose of the feature is
56595+ to help Linux distribution developers get rid of libraries and
56596+ binaries that need text relocations which hinder the future progress
56597+ of PaX. Only Linux distribution developers should say Y here, and
56598+ never on a production machine, as this option creates an information
56599+ leak that could aid an attacker in defeating the randomization of
56600+ a single memory region. If the sysctl option is enabled, a sysctl
56601+ option with name "audit_textrel" is created.
56602+
56603+endmenu
56604+
56605+menu "Executable Protections"
56606+depends on GRKERNSEC
56607+
56608+config GRKERNSEC_DMESG
56609+ bool "Dmesg(8) restriction"
56610+ help
56611+ If you say Y here, non-root users will not be able to use dmesg(8)
56612+ to view up to the last 4kb of messages in the kernel's log buffer.
56613+ The kernel's log buffer often contains kernel addresses and other
56614+ identifying information useful to an attacker in fingerprinting a
56615+ system for a targeted exploit.
56616+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56617+ created.
56618+
56619+config GRKERNSEC_HARDEN_PTRACE
56620+ bool "Deter ptrace-based process snooping"
56621+ help
56622+ If you say Y here, TTY sniffers and other malicious monitoring
56623+ programs implemented through ptrace will be defeated. If you
56624+ have been using the RBAC system, this option has already been
56625+ enabled for several years for all users, with the ability to make
56626+ fine-grained exceptions.
56627+
56628+ This option only affects the ability of non-root users to ptrace
56629+ processes that are not a descendent of the ptracing process.
56630+ This means that strace ./binary and gdb ./binary will still work,
56631+ but attaching to arbitrary processes will not. If the sysctl
56632+ option is enabled, a sysctl option with name "harden_ptrace" is
56633+ created.
56634+
56635+config GRKERNSEC_TPE
56636+ bool "Trusted Path Execution (TPE)"
56637+ help
56638+ If you say Y here, you will be able to choose a gid to add to the
56639+ supplementary groups of users you want to mark as "untrusted."
56640+ These users will not be able to execute any files that are not in
56641+ root-owned directories writable only by root. If the sysctl option
56642+ is enabled, a sysctl option with name "tpe" is created.
56643+
56644+config GRKERNSEC_TPE_ALL
56645+ bool "Partially restrict all non-root users"
56646+ depends on GRKERNSEC_TPE
56647+ help
56648+ If you say Y here, all non-root users will be covered under
56649+ a weaker TPE restriction. This is separate from, and in addition to,
56650+ the main TPE options that you have selected elsewhere. Thus, if a
56651+ "trusted" GID is chosen, this restriction applies to even that GID.
56652+ Under this restriction, all non-root users will only be allowed to
56653+ execute files in directories they own that are not group or
56654+ world-writable, or in directories owned by root and writable only by
56655+ root. If the sysctl option is enabled, a sysctl option with name
56656+ "tpe_restrict_all" is created.
56657+
56658+config GRKERNSEC_TPE_INVERT
56659+ bool "Invert GID option"
56660+ depends on GRKERNSEC_TPE
56661+ help
56662+ If you say Y here, the group you specify in the TPE configuration will
56663+ decide what group TPE restrictions will be *disabled* for. This
56664+ option is useful if you want TPE restrictions to be applied to most
56665+ users on the system. If the sysctl option is enabled, a sysctl option
56666+ with name "tpe_invert" is created. Unlike other sysctl options, this
56667+ entry will default to on for backward-compatibility.
56668+
56669+config GRKERNSEC_TPE_GID
56670+ int "GID for untrusted users"
56671+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56672+ default 1005
56673+ help
56674+ Setting this GID determines what group TPE restrictions will be
56675+ *enabled* for. If the sysctl option is enabled, a sysctl option
56676+ with name "tpe_gid" is created.
56677+
56678+config GRKERNSEC_TPE_GID
56679+ int "GID for trusted users"
56680+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56681+ default 1005
56682+ help
56683+ Setting this GID determines what group TPE restrictions will be
56684+ *disabled* for. If the sysctl option is enabled, a sysctl option
56685+ with name "tpe_gid" is created.
56686+
56687+endmenu
56688+menu "Network Protections"
56689+depends on GRKERNSEC
56690+
56691+config GRKERNSEC_RANDNET
56692+ bool "Larger entropy pools"
56693+ help
56694+ If you say Y here, the entropy pools used for many features of Linux
56695+ and grsecurity will be doubled in size. Since several grsecurity
56696+ features use additional randomness, it is recommended that you say Y
56697+ here. Saying Y here has a similar effect as modifying
56698+ /proc/sys/kernel/random/poolsize.
56699+
56700+config GRKERNSEC_BLACKHOLE
56701+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56702+ depends on NET
56703+ help
56704+ If you say Y here, neither TCP resets nor ICMP
56705+ destination-unreachable packets will be sent in response to packets
56706+ sent to ports for which no associated listening process exists.
56707+ This feature supports both IPV4 and IPV6 and exempts the
56708+ loopback interface from blackholing. Enabling this feature
56709+ makes a host more resilient to DoS attacks and reduces network
56710+ visibility against scanners.
56711+
56712+ The blackhole feature as-implemented is equivalent to the FreeBSD
56713+ blackhole feature, as it prevents RST responses to all packets, not
56714+ just SYNs. Under most application behavior this causes no
56715+ problems, but applications (like haproxy) may not close certain
56716+ connections in a way that cleanly terminates them on the remote
56717+ end, leaving the remote host in LAST_ACK state. Because of this
56718+ side-effect and to prevent intentional LAST_ACK DoSes, this
56719+ feature also adds automatic mitigation against such attacks.
56720+ The mitigation drastically reduces the amount of time a socket
56721+ can spend in LAST_ACK state. If you're using haproxy and not
56722+ all servers it connects to have this option enabled, consider
56723+ disabling this feature on the haproxy host.
56724+
56725+ If the sysctl option is enabled, two sysctl options with names
56726+ "ip_blackhole" and "lastack_retries" will be created.
56727+ While "ip_blackhole" takes the standard zero/non-zero on/off
56728+ toggle, "lastack_retries" uses the same kinds of values as
56729+ "tcp_retries1" and "tcp_retries2". The default value of 4
56730+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56731+ state.
56732+
56733+config GRKERNSEC_SOCKET
56734+ bool "Socket restrictions"
56735+ depends on NET
56736+ help
56737+ If you say Y here, you will be able to choose from several options.
56738+ If you assign a GID on your system and add it to the supplementary
56739+ groups of users you want to restrict socket access to, this patch
56740+ will perform up to three things, based on the option(s) you choose.
56741+
56742+config GRKERNSEC_SOCKET_ALL
56743+ bool "Deny any sockets to group"
56744+ depends on GRKERNSEC_SOCKET
56745+ help
56746+ If you say Y here, you will be able to choose a GID of whose users will
56747+ be unable to connect to other hosts from your machine or run server
56748+ applications from your machine. If the sysctl option is enabled, a
56749+ sysctl option with name "socket_all" is created.
56750+
56751+config GRKERNSEC_SOCKET_ALL_GID
56752+ int "GID to deny all sockets for"
56753+ depends on GRKERNSEC_SOCKET_ALL
56754+ default 1004
56755+ help
56756+ Here you can choose the GID to disable socket access for. Remember to
56757+ add the users you want socket access disabled for to the GID
56758+ specified here. If the sysctl option is enabled, a sysctl option
56759+ with name "socket_all_gid" is created.
56760+
56761+config GRKERNSEC_SOCKET_CLIENT
56762+ bool "Deny client sockets to group"
56763+ depends on GRKERNSEC_SOCKET
56764+ help
56765+ If you say Y here, you will be able to choose a GID of whose users will
56766+ be unable to connect to other hosts from your machine, but will be
56767+ able to run servers. If this option is enabled, all users in the group
56768+ you specify will have to use passive mode when initiating ftp transfers
56769+ from the shell on your machine. If the sysctl option is enabled, a
56770+ sysctl option with name "socket_client" is created.
56771+
56772+config GRKERNSEC_SOCKET_CLIENT_GID
56773+ int "GID to deny client sockets for"
56774+ depends on GRKERNSEC_SOCKET_CLIENT
56775+ default 1003
56776+ help
56777+ Here you can choose the GID to disable client socket access for.
56778+ Remember to add the users you want client socket access disabled for to
56779+ the GID specified here. If the sysctl option is enabled, a sysctl
56780+ option with name "socket_client_gid" is created.
56781+
56782+config GRKERNSEC_SOCKET_SERVER
56783+ bool "Deny server sockets to group"
56784+ depends on GRKERNSEC_SOCKET
56785+ help
56786+ If you say Y here, you will be able to choose a GID of whose users will
56787+ be unable to run server applications from your machine. If the sysctl
56788+ option is enabled, a sysctl option with name "socket_server" is created.
56789+
56790+config GRKERNSEC_SOCKET_SERVER_GID
56791+ int "GID to deny server sockets for"
56792+ depends on GRKERNSEC_SOCKET_SERVER
56793+ default 1002
56794+ help
56795+ Here you can choose the GID to disable server socket access for.
56796+ Remember to add the users you want server socket access disabled for to
56797+ the GID specified here. If the sysctl option is enabled, a sysctl
56798+ option with name "socket_server_gid" is created.
56799+
56800+endmenu
56801+menu "Sysctl support"
56802+depends on GRKERNSEC && SYSCTL
56803+
56804+config GRKERNSEC_SYSCTL
56805+ bool "Sysctl support"
56806+ help
56807+ If you say Y here, you will be able to change the options that
56808+ grsecurity runs with at bootup, without having to recompile your
56809+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56810+ to enable (1) or disable (0) various features. All the sysctl entries
56811+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56812+ All features enabled in the kernel configuration are disabled at boot
56813+ if you do not say Y to the "Turn on features by default" option.
56814+ All options should be set at startup, and the grsec_lock entry should
56815+ be set to a non-zero value after all the options are set.
56816+ *THIS IS EXTREMELY IMPORTANT*
56817+
56818+config GRKERNSEC_SYSCTL_DISTRO
56819+ bool "Extra sysctl support for distro makers (READ HELP)"
56820+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56821+ help
56822+ If you say Y here, additional sysctl options will be created
56823+ for features that affect processes running as root. Therefore,
56824+ it is critical when using this option that the grsec_lock entry be
56825+ enabled after boot. Only distros with prebuilt kernel packages
56826+ with this option enabled that can ensure grsec_lock is enabled
56827+ after boot should use this option.
56828+ *Failure to set grsec_lock after boot makes all grsec features
56829+ this option covers useless*
56830+
56831+ Currently this option creates the following sysctl entries:
56832+ "Disable Privileged I/O": "disable_priv_io"
56833+
56834+config GRKERNSEC_SYSCTL_ON
56835+ bool "Turn on features by default"
56836+ depends on GRKERNSEC_SYSCTL
56837+ help
56838+ If you say Y here, instead of having all features enabled in the
56839+ kernel configuration disabled at boot time, the features will be
56840+ enabled at boot time. It is recommended you say Y here unless
56841+ there is some reason you would want all sysctl-tunable features to
56842+ be disabled by default. As mentioned elsewhere, it is important
56843+ to enable the grsec_lock entry once you have finished modifying
56844+ the sysctl entries.
56845+
56846+endmenu
56847+menu "Logging Options"
56848+depends on GRKERNSEC
56849+
56850+config GRKERNSEC_FLOODTIME
56851+ int "Seconds in between log messages (minimum)"
56852+ default 10
56853+ help
56854+ This option allows you to enforce the number of seconds between
56855+ grsecurity log messages. The default should be suitable for most
56856+ people, however, if you choose to change it, choose a value small enough
56857+ to allow informative logs to be produced, but large enough to
56858+ prevent flooding.
56859+
56860+config GRKERNSEC_FLOODBURST
56861+ int "Number of messages in a burst (maximum)"
56862+ default 6
56863+ help
56864+ This option allows you to choose the maximum number of messages allowed
56865+ within the flood time interval you chose in a separate option. The
56866+ default should be suitable for most people, however if you find that
56867+ many of your logs are being interpreted as flooding, you may want to
56868+ raise this value.
56869+
56870+endmenu
56871+
56872+endmenu
56873diff -urNp linux-3.1.1/grsecurity/Makefile linux-3.1.1/grsecurity/Makefile
56874--- linux-3.1.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56875+++ linux-3.1.1/grsecurity/Makefile 2011-11-16 18:40:31.000000000 -0500
56876@@ -0,0 +1,36 @@
56877+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56878+# during 2001-2009 it has been completely redesigned by Brad Spengler
56879+# into an RBAC system
56880+#
56881+# All code in this directory and various hooks inserted throughout the kernel
56882+# are copyright Brad Spengler - Open Source Security, Inc., and released
56883+# under the GPL v2 or higher
56884+
56885+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56886+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56887+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56888+
56889+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56890+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56891+ gracl_learn.o grsec_log.o
56892+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56893+
56894+ifdef CONFIG_NET
56895+obj-y += grsec_sock.o
56896+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56897+endif
56898+
56899+ifndef CONFIG_GRKERNSEC
56900+obj-y += grsec_disabled.o
56901+endif
56902+
56903+ifdef CONFIG_GRKERNSEC_HIDESYM
56904+extra-y := grsec_hidesym.o
56905+$(obj)/grsec_hidesym.o:
56906+ @-chmod -f 500 /boot
56907+ @-chmod -f 500 /lib/modules
56908+ @-chmod -f 500 /lib64/modules
56909+ @-chmod -f 500 /lib32/modules
56910+ @-chmod -f 700 .
56911+ @echo ' grsec: protected kernel image paths'
56912+endif
56913diff -urNp linux-3.1.1/include/acpi/acpi_bus.h linux-3.1.1/include/acpi/acpi_bus.h
56914--- linux-3.1.1/include/acpi/acpi_bus.h 2011-11-11 15:19:27.000000000 -0500
56915+++ linux-3.1.1/include/acpi/acpi_bus.h 2011-11-16 18:39:08.000000000 -0500
56916@@ -107,7 +107,7 @@ struct acpi_device_ops {
56917 acpi_op_bind bind;
56918 acpi_op_unbind unbind;
56919 acpi_op_notify notify;
56920-};
56921+} __no_const;
56922
56923 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56924
56925diff -urNp linux-3.1.1/include/asm-generic/atomic-long.h linux-3.1.1/include/asm-generic/atomic-long.h
56926--- linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-11 15:19:27.000000000 -0500
56927+++ linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-16 18:39:08.000000000 -0500
56928@@ -22,6 +22,12 @@
56929
56930 typedef atomic64_t atomic_long_t;
56931
56932+#ifdef CONFIG_PAX_REFCOUNT
56933+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56934+#else
56935+typedef atomic64_t atomic_long_unchecked_t;
56936+#endif
56937+
56938 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56939
56940 static inline long atomic_long_read(atomic_long_t *l)
56941@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56942 return (long)atomic64_read(v);
56943 }
56944
56945+#ifdef CONFIG_PAX_REFCOUNT
56946+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56947+{
56948+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56949+
56950+ return (long)atomic64_read_unchecked(v);
56951+}
56952+#endif
56953+
56954 static inline void atomic_long_set(atomic_long_t *l, long i)
56955 {
56956 atomic64_t *v = (atomic64_t *)l;
56957@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56958 atomic64_set(v, i);
56959 }
56960
56961+#ifdef CONFIG_PAX_REFCOUNT
56962+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56963+{
56964+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56965+
56966+ atomic64_set_unchecked(v, i);
56967+}
56968+#endif
56969+
56970 static inline void atomic_long_inc(atomic_long_t *l)
56971 {
56972 atomic64_t *v = (atomic64_t *)l;
56973@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56974 atomic64_inc(v);
56975 }
56976
56977+#ifdef CONFIG_PAX_REFCOUNT
56978+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56979+{
56980+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56981+
56982+ atomic64_inc_unchecked(v);
56983+}
56984+#endif
56985+
56986 static inline void atomic_long_dec(atomic_long_t *l)
56987 {
56988 atomic64_t *v = (atomic64_t *)l;
56989@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56990 atomic64_dec(v);
56991 }
56992
56993+#ifdef CONFIG_PAX_REFCOUNT
56994+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56995+{
56996+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56997+
56998+ atomic64_dec_unchecked(v);
56999+}
57000+#endif
57001+
57002 static inline void atomic_long_add(long i, atomic_long_t *l)
57003 {
57004 atomic64_t *v = (atomic64_t *)l;
57005@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57006 atomic64_add(i, v);
57007 }
57008
57009+#ifdef CONFIG_PAX_REFCOUNT
57010+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57011+{
57012+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57013+
57014+ atomic64_add_unchecked(i, v);
57015+}
57016+#endif
57017+
57018 static inline void atomic_long_sub(long i, atomic_long_t *l)
57019 {
57020 atomic64_t *v = (atomic64_t *)l;
57021@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
57022 atomic64_sub(i, v);
57023 }
57024
57025+#ifdef CONFIG_PAX_REFCOUNT
57026+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57027+{
57028+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57029+
57030+ atomic64_sub_unchecked(i, v);
57031+}
57032+#endif
57033+
57034 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57035 {
57036 atomic64_t *v = (atomic64_t *)l;
57037@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
57038 return (long)atomic64_inc_return(v);
57039 }
57040
57041+#ifdef CONFIG_PAX_REFCOUNT
57042+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57043+{
57044+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57045+
57046+ return (long)atomic64_inc_return_unchecked(v);
57047+}
57048+#endif
57049+
57050 static inline long atomic_long_dec_return(atomic_long_t *l)
57051 {
57052 atomic64_t *v = (atomic64_t *)l;
57053@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
57054
57055 typedef atomic_t atomic_long_t;
57056
57057+#ifdef CONFIG_PAX_REFCOUNT
57058+typedef atomic_unchecked_t atomic_long_unchecked_t;
57059+#else
57060+typedef atomic_t atomic_long_unchecked_t;
57061+#endif
57062+
57063 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57064 static inline long atomic_long_read(atomic_long_t *l)
57065 {
57066@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
57067 return (long)atomic_read(v);
57068 }
57069
57070+#ifdef CONFIG_PAX_REFCOUNT
57071+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57072+{
57073+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57074+
57075+ return (long)atomic_read_unchecked(v);
57076+}
57077+#endif
57078+
57079 static inline void atomic_long_set(atomic_long_t *l, long i)
57080 {
57081 atomic_t *v = (atomic_t *)l;
57082@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
57083 atomic_set(v, i);
57084 }
57085
57086+#ifdef CONFIG_PAX_REFCOUNT
57087+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57088+{
57089+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57090+
57091+ atomic_set_unchecked(v, i);
57092+}
57093+#endif
57094+
57095 static inline void atomic_long_inc(atomic_long_t *l)
57096 {
57097 atomic_t *v = (atomic_t *)l;
57098@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
57099 atomic_inc(v);
57100 }
57101
57102+#ifdef CONFIG_PAX_REFCOUNT
57103+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57104+{
57105+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57106+
57107+ atomic_inc_unchecked(v);
57108+}
57109+#endif
57110+
57111 static inline void atomic_long_dec(atomic_long_t *l)
57112 {
57113 atomic_t *v = (atomic_t *)l;
57114@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
57115 atomic_dec(v);
57116 }
57117
57118+#ifdef CONFIG_PAX_REFCOUNT
57119+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57120+{
57121+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57122+
57123+ atomic_dec_unchecked(v);
57124+}
57125+#endif
57126+
57127 static inline void atomic_long_add(long i, atomic_long_t *l)
57128 {
57129 atomic_t *v = (atomic_t *)l;
57130@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
57131 atomic_add(i, v);
57132 }
57133
57134+#ifdef CONFIG_PAX_REFCOUNT
57135+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57136+{
57137+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57138+
57139+ atomic_add_unchecked(i, v);
57140+}
57141+#endif
57142+
57143 static inline void atomic_long_sub(long i, atomic_long_t *l)
57144 {
57145 atomic_t *v = (atomic_t *)l;
57146@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
57147 atomic_sub(i, v);
57148 }
57149
57150+#ifdef CONFIG_PAX_REFCOUNT
57151+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57152+{
57153+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57154+
57155+ atomic_sub_unchecked(i, v);
57156+}
57157+#endif
57158+
57159 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57160 {
57161 atomic_t *v = (atomic_t *)l;
57162@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
57163 return (long)atomic_inc_return(v);
57164 }
57165
57166+#ifdef CONFIG_PAX_REFCOUNT
57167+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57168+{
57169+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57170+
57171+ return (long)atomic_inc_return_unchecked(v);
57172+}
57173+#endif
57174+
57175 static inline long atomic_long_dec_return(atomic_long_t *l)
57176 {
57177 atomic_t *v = (atomic_t *)l;
57178@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
57179
57180 #endif /* BITS_PER_LONG == 64 */
57181
57182+#ifdef CONFIG_PAX_REFCOUNT
57183+static inline void pax_refcount_needs_these_functions(void)
57184+{
57185+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57186+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57187+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57188+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57189+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57190+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57191+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57192+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57193+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57194+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57195+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57196+
57197+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57198+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57199+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57200+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57201+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57202+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57203+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57204+}
57205+#else
57206+#define atomic_read_unchecked(v) atomic_read(v)
57207+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57208+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57209+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57210+#define atomic_inc_unchecked(v) atomic_inc(v)
57211+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57212+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57213+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57214+#define atomic_dec_unchecked(v) atomic_dec(v)
57215+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57216+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57217+
57218+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57219+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57220+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57221+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57222+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57223+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57224+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57225+#endif
57226+
57227 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57228diff -urNp linux-3.1.1/include/asm-generic/cache.h linux-3.1.1/include/asm-generic/cache.h
57229--- linux-3.1.1/include/asm-generic/cache.h 2011-11-11 15:19:27.000000000 -0500
57230+++ linux-3.1.1/include/asm-generic/cache.h 2011-11-16 18:39:08.000000000 -0500
57231@@ -6,7 +6,7 @@
57232 * cache lines need to provide their own cache.h.
57233 */
57234
57235-#define L1_CACHE_SHIFT 5
57236-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57237+#define L1_CACHE_SHIFT 5UL
57238+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57239
57240 #endif /* __ASM_GENERIC_CACHE_H */
57241diff -urNp linux-3.1.1/include/asm-generic/int-l64.h linux-3.1.1/include/asm-generic/int-l64.h
57242--- linux-3.1.1/include/asm-generic/int-l64.h 2011-11-11 15:19:27.000000000 -0500
57243+++ linux-3.1.1/include/asm-generic/int-l64.h 2011-11-16 18:39:08.000000000 -0500
57244@@ -46,6 +46,8 @@ typedef unsigned int u32;
57245 typedef signed long s64;
57246 typedef unsigned long u64;
57247
57248+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57249+
57250 #define S8_C(x) x
57251 #define U8_C(x) x ## U
57252 #define S16_C(x) x
57253diff -urNp linux-3.1.1/include/asm-generic/int-ll64.h linux-3.1.1/include/asm-generic/int-ll64.h
57254--- linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-11 15:19:27.000000000 -0500
57255+++ linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-16 18:39:08.000000000 -0500
57256@@ -51,6 +51,8 @@ typedef unsigned int u32;
57257 typedef signed long long s64;
57258 typedef unsigned long long u64;
57259
57260+typedef unsigned long long intoverflow_t;
57261+
57262 #define S8_C(x) x
57263 #define U8_C(x) x ## U
57264 #define S16_C(x) x
57265diff -urNp linux-3.1.1/include/asm-generic/kmap_types.h linux-3.1.1/include/asm-generic/kmap_types.h
57266--- linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
57267+++ linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-16 18:39:08.000000000 -0500
57268@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57269 KMAP_D(17) KM_NMI,
57270 KMAP_D(18) KM_NMI_PTE,
57271 KMAP_D(19) KM_KDB,
57272+KMAP_D(20) KM_CLEARPAGE,
57273 /*
57274 * Remember to update debug_kmap_atomic() when adding new kmap types!
57275 */
57276-KMAP_D(20) KM_TYPE_NR
57277+KMAP_D(21) KM_TYPE_NR
57278 };
57279
57280 #undef KMAP_D
57281diff -urNp linux-3.1.1/include/asm-generic/pgtable.h linux-3.1.1/include/asm-generic/pgtable.h
57282--- linux-3.1.1/include/asm-generic/pgtable.h 2011-11-11 15:19:27.000000000 -0500
57283+++ linux-3.1.1/include/asm-generic/pgtable.h 2011-11-16 18:39:08.000000000 -0500
57284@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57285 #endif /* __HAVE_ARCH_PMD_WRITE */
57286 #endif
57287
57288+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57289+static inline unsigned long pax_open_kernel(void) { return 0; }
57290+#endif
57291+
57292+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57293+static inline unsigned long pax_close_kernel(void) { return 0; }
57294+#endif
57295+
57296 #endif /* !__ASSEMBLY__ */
57297
57298 #endif /* _ASM_GENERIC_PGTABLE_H */
57299diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopmd.h linux-3.1.1/include/asm-generic/pgtable-nopmd.h
57300--- linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-11 15:19:27.000000000 -0500
57301+++ linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-16 18:39:08.000000000 -0500
57302@@ -1,14 +1,19 @@
57303 #ifndef _PGTABLE_NOPMD_H
57304 #define _PGTABLE_NOPMD_H
57305
57306-#ifndef __ASSEMBLY__
57307-
57308 #include <asm-generic/pgtable-nopud.h>
57309
57310-struct mm_struct;
57311-
57312 #define __PAGETABLE_PMD_FOLDED
57313
57314+#define PMD_SHIFT PUD_SHIFT
57315+#define PTRS_PER_PMD 1
57316+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57317+#define PMD_MASK (~(PMD_SIZE-1))
57318+
57319+#ifndef __ASSEMBLY__
57320+
57321+struct mm_struct;
57322+
57323 /*
57324 * Having the pmd type consist of a pud gets the size right, and allows
57325 * us to conceptually access the pud entry that this pmd is folded into
57326@@ -16,11 +21,6 @@ struct mm_struct;
57327 */
57328 typedef struct { pud_t pud; } pmd_t;
57329
57330-#define PMD_SHIFT PUD_SHIFT
57331-#define PTRS_PER_PMD 1
57332-#define PMD_SIZE (1UL << PMD_SHIFT)
57333-#define PMD_MASK (~(PMD_SIZE-1))
57334-
57335 /*
57336 * The "pud_xxx()" functions here are trivial for a folded two-level
57337 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57338diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopud.h linux-3.1.1/include/asm-generic/pgtable-nopud.h
57339--- linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-11 15:19:27.000000000 -0500
57340+++ linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-16 18:39:08.000000000 -0500
57341@@ -1,10 +1,15 @@
57342 #ifndef _PGTABLE_NOPUD_H
57343 #define _PGTABLE_NOPUD_H
57344
57345-#ifndef __ASSEMBLY__
57346-
57347 #define __PAGETABLE_PUD_FOLDED
57348
57349+#define PUD_SHIFT PGDIR_SHIFT
57350+#define PTRS_PER_PUD 1
57351+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57352+#define PUD_MASK (~(PUD_SIZE-1))
57353+
57354+#ifndef __ASSEMBLY__
57355+
57356 /*
57357 * Having the pud type consist of a pgd gets the size right, and allows
57358 * us to conceptually access the pgd entry that this pud is folded into
57359@@ -12,11 +17,6 @@
57360 */
57361 typedef struct { pgd_t pgd; } pud_t;
57362
57363-#define PUD_SHIFT PGDIR_SHIFT
57364-#define PTRS_PER_PUD 1
57365-#define PUD_SIZE (1UL << PUD_SHIFT)
57366-#define PUD_MASK (~(PUD_SIZE-1))
57367-
57368 /*
57369 * The "pgd_xxx()" functions here are trivial for a folded two-level
57370 * setup: the pud is never bad, and a pud always exists (as it's folded
57371diff -urNp linux-3.1.1/include/asm-generic/vmlinux.lds.h linux-3.1.1/include/asm-generic/vmlinux.lds.h
57372--- linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-11 15:19:27.000000000 -0500
57373+++ linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-16 18:39:08.000000000 -0500
57374@@ -217,6 +217,7 @@
57375 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57376 VMLINUX_SYMBOL(__start_rodata) = .; \
57377 *(.rodata) *(.rodata.*) \
57378+ *(.data..read_only) \
57379 *(__vermagic) /* Kernel version magic */ \
57380 . = ALIGN(8); \
57381 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57382@@ -723,17 +724,18 @@
57383 * section in the linker script will go there too. @phdr should have
57384 * a leading colon.
57385 *
57386- * Note that this macros defines __per_cpu_load as an absolute symbol.
57387+ * Note that this macros defines per_cpu_load as an absolute symbol.
57388 * If there is no need to put the percpu section at a predetermined
57389 * address, use PERCPU_SECTION.
57390 */
57391 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57392- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57393- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57394+ per_cpu_load = .; \
57395+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57396 - LOAD_OFFSET) { \
57397+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57398 PERCPU_INPUT(cacheline) \
57399 } phdr \
57400- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57401+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57402
57403 /**
57404 * PERCPU_SECTION - define output section for percpu area, simple version
57405diff -urNp linux-3.1.1/include/drm/drm_crtc_helper.h linux-3.1.1/include/drm/drm_crtc_helper.h
57406--- linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-11 15:19:27.000000000 -0500
57407+++ linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-16 18:39:08.000000000 -0500
57408@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57409
57410 /* disable crtc when not in use - more explicit than dpms off */
57411 void (*disable)(struct drm_crtc *crtc);
57412-};
57413+} __no_const;
57414
57415 struct drm_encoder_helper_funcs {
57416 void (*dpms)(struct drm_encoder *encoder, int mode);
57417@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57418 struct drm_connector *connector);
57419 /* disable encoder when not in use - more explicit than dpms off */
57420 void (*disable)(struct drm_encoder *encoder);
57421-};
57422+} __no_const;
57423
57424 struct drm_connector_helper_funcs {
57425 int (*get_modes)(struct drm_connector *connector);
57426diff -urNp linux-3.1.1/include/drm/drmP.h linux-3.1.1/include/drm/drmP.h
57427--- linux-3.1.1/include/drm/drmP.h 2011-11-11 15:19:27.000000000 -0500
57428+++ linux-3.1.1/include/drm/drmP.h 2011-11-16 18:39:08.000000000 -0500
57429@@ -73,6 +73,7 @@
57430 #include <linux/workqueue.h>
57431 #include <linux/poll.h>
57432 #include <asm/pgalloc.h>
57433+#include <asm/local.h>
57434 #include "drm.h"
57435
57436 #include <linux/idr.h>
57437@@ -1035,7 +1036,7 @@ struct drm_device {
57438
57439 /** \name Usage Counters */
57440 /*@{ */
57441- int open_count; /**< Outstanding files open */
57442+ local_t open_count; /**< Outstanding files open */
57443 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57444 atomic_t vma_count; /**< Outstanding vma areas open */
57445 int buf_use; /**< Buffers in use -- cannot alloc */
57446@@ -1046,7 +1047,7 @@ struct drm_device {
57447 /*@{ */
57448 unsigned long counters;
57449 enum drm_stat_type types[15];
57450- atomic_t counts[15];
57451+ atomic_unchecked_t counts[15];
57452 /*@} */
57453
57454 struct list_head filelist;
57455diff -urNp linux-3.1.1/include/drm/ttm/ttm_memory.h linux-3.1.1/include/drm/ttm/ttm_memory.h
57456--- linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-11 15:19:27.000000000 -0500
57457+++ linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-16 18:39:08.000000000 -0500
57458@@ -47,7 +47,7 @@
57459
57460 struct ttm_mem_shrink {
57461 int (*do_shrink) (struct ttm_mem_shrink *);
57462-};
57463+} __no_const;
57464
57465 /**
57466 * struct ttm_mem_global - Global memory accounting structure.
57467diff -urNp linux-3.1.1/include/linux/a.out.h linux-3.1.1/include/linux/a.out.h
57468--- linux-3.1.1/include/linux/a.out.h 2011-11-11 15:19:27.000000000 -0500
57469+++ linux-3.1.1/include/linux/a.out.h 2011-11-16 18:39:08.000000000 -0500
57470@@ -39,6 +39,14 @@ enum machine_type {
57471 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57472 };
57473
57474+/* Constants for the N_FLAGS field */
57475+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57476+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57477+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57478+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57479+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57480+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57481+
57482 #if !defined (N_MAGIC)
57483 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57484 #endif
57485diff -urNp linux-3.1.1/include/linux/atmdev.h linux-3.1.1/include/linux/atmdev.h
57486--- linux-3.1.1/include/linux/atmdev.h 2011-11-11 15:19:27.000000000 -0500
57487+++ linux-3.1.1/include/linux/atmdev.h 2011-11-16 18:39:08.000000000 -0500
57488@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57489 #endif
57490
57491 struct k_atm_aal_stats {
57492-#define __HANDLE_ITEM(i) atomic_t i
57493+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57494 __AAL_STAT_ITEMS
57495 #undef __HANDLE_ITEM
57496 };
57497diff -urNp linux-3.1.1/include/linux/binfmts.h linux-3.1.1/include/linux/binfmts.h
57498--- linux-3.1.1/include/linux/binfmts.h 2011-11-11 15:19:27.000000000 -0500
57499+++ linux-3.1.1/include/linux/binfmts.h 2011-11-16 18:39:08.000000000 -0500
57500@@ -88,6 +88,7 @@ struct linux_binfmt {
57501 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57502 int (*load_shlib)(struct file *);
57503 int (*core_dump)(struct coredump_params *cprm);
57504+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57505 unsigned long min_coredump; /* minimal dump size */
57506 };
57507
57508diff -urNp linux-3.1.1/include/linux/blkdev.h linux-3.1.1/include/linux/blkdev.h
57509--- linux-3.1.1/include/linux/blkdev.h 2011-11-11 15:19:27.000000000 -0500
57510+++ linux-3.1.1/include/linux/blkdev.h 2011-11-16 18:39:08.000000000 -0500
57511@@ -1321,7 +1321,7 @@ struct block_device_operations {
57512 /* this callback is with swap_lock and sometimes page table lock held */
57513 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57514 struct module *owner;
57515-};
57516+} __do_const;
57517
57518 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57519 unsigned long);
57520diff -urNp linux-3.1.1/include/linux/blktrace_api.h linux-3.1.1/include/linux/blktrace_api.h
57521--- linux-3.1.1/include/linux/blktrace_api.h 2011-11-11 15:19:27.000000000 -0500
57522+++ linux-3.1.1/include/linux/blktrace_api.h 2011-11-16 18:39:08.000000000 -0500
57523@@ -162,7 +162,7 @@ struct blk_trace {
57524 struct dentry *dir;
57525 struct dentry *dropped_file;
57526 struct dentry *msg_file;
57527- atomic_t dropped;
57528+ atomic_unchecked_t dropped;
57529 };
57530
57531 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57532diff -urNp linux-3.1.1/include/linux/byteorder/little_endian.h linux-3.1.1/include/linux/byteorder/little_endian.h
57533--- linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-11 15:19:27.000000000 -0500
57534+++ linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-16 18:39:08.000000000 -0500
57535@@ -42,51 +42,51 @@
57536
57537 static inline __le64 __cpu_to_le64p(const __u64 *p)
57538 {
57539- return (__force __le64)*p;
57540+ return (__force const __le64)*p;
57541 }
57542 static inline __u64 __le64_to_cpup(const __le64 *p)
57543 {
57544- return (__force __u64)*p;
57545+ return (__force const __u64)*p;
57546 }
57547 static inline __le32 __cpu_to_le32p(const __u32 *p)
57548 {
57549- return (__force __le32)*p;
57550+ return (__force const __le32)*p;
57551 }
57552 static inline __u32 __le32_to_cpup(const __le32 *p)
57553 {
57554- return (__force __u32)*p;
57555+ return (__force const __u32)*p;
57556 }
57557 static inline __le16 __cpu_to_le16p(const __u16 *p)
57558 {
57559- return (__force __le16)*p;
57560+ return (__force const __le16)*p;
57561 }
57562 static inline __u16 __le16_to_cpup(const __le16 *p)
57563 {
57564- return (__force __u16)*p;
57565+ return (__force const __u16)*p;
57566 }
57567 static inline __be64 __cpu_to_be64p(const __u64 *p)
57568 {
57569- return (__force __be64)__swab64p(p);
57570+ return (__force const __be64)__swab64p(p);
57571 }
57572 static inline __u64 __be64_to_cpup(const __be64 *p)
57573 {
57574- return __swab64p((__u64 *)p);
57575+ return __swab64p((const __u64 *)p);
57576 }
57577 static inline __be32 __cpu_to_be32p(const __u32 *p)
57578 {
57579- return (__force __be32)__swab32p(p);
57580+ return (__force const __be32)__swab32p(p);
57581 }
57582 static inline __u32 __be32_to_cpup(const __be32 *p)
57583 {
57584- return __swab32p((__u32 *)p);
57585+ return __swab32p((const __u32 *)p);
57586 }
57587 static inline __be16 __cpu_to_be16p(const __u16 *p)
57588 {
57589- return (__force __be16)__swab16p(p);
57590+ return (__force const __be16)__swab16p(p);
57591 }
57592 static inline __u16 __be16_to_cpup(const __be16 *p)
57593 {
57594- return __swab16p((__u16 *)p);
57595+ return __swab16p((const __u16 *)p);
57596 }
57597 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57598 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57599diff -urNp linux-3.1.1/include/linux/cache.h linux-3.1.1/include/linux/cache.h
57600--- linux-3.1.1/include/linux/cache.h 2011-11-11 15:19:27.000000000 -0500
57601+++ linux-3.1.1/include/linux/cache.h 2011-11-16 18:39:08.000000000 -0500
57602@@ -16,6 +16,10 @@
57603 #define __read_mostly
57604 #endif
57605
57606+#ifndef __read_only
57607+#define __read_only __read_mostly
57608+#endif
57609+
57610 #ifndef ____cacheline_aligned
57611 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57612 #endif
57613diff -urNp linux-3.1.1/include/linux/capability.h linux-3.1.1/include/linux/capability.h
57614--- linux-3.1.1/include/linux/capability.h 2011-11-11 15:19:27.000000000 -0500
57615+++ linux-3.1.1/include/linux/capability.h 2011-11-16 18:40:31.000000000 -0500
57616@@ -547,6 +547,9 @@ extern bool capable(int cap);
57617 extern bool ns_capable(struct user_namespace *ns, int cap);
57618 extern bool task_ns_capable(struct task_struct *t, int cap);
57619 extern bool nsown_capable(int cap);
57620+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57621+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57622+extern bool capable_nolog(int cap);
57623
57624 /* audit system wants to get cap info from files as well */
57625 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57626diff -urNp linux-3.1.1/include/linux/cleancache.h linux-3.1.1/include/linux/cleancache.h
57627--- linux-3.1.1/include/linux/cleancache.h 2011-11-11 15:19:27.000000000 -0500
57628+++ linux-3.1.1/include/linux/cleancache.h 2011-11-16 18:39:08.000000000 -0500
57629@@ -31,7 +31,7 @@ struct cleancache_ops {
57630 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57631 void (*flush_inode)(int, struct cleancache_filekey);
57632 void (*flush_fs)(int);
57633-};
57634+} __no_const;
57635
57636 extern struct cleancache_ops
57637 cleancache_register_ops(struct cleancache_ops *ops);
57638diff -urNp linux-3.1.1/include/linux/compiler-gcc4.h linux-3.1.1/include/linux/compiler-gcc4.h
57639--- linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-11 15:19:27.000000000 -0500
57640+++ linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-16 18:39:08.000000000 -0500
57641@@ -31,6 +31,12 @@
57642
57643
57644 #if __GNUC_MINOR__ >= 5
57645+
57646+#ifdef CONSTIFY_PLUGIN
57647+#define __no_const __attribute__((no_const))
57648+#define __do_const __attribute__((do_const))
57649+#endif
57650+
57651 /*
57652 * Mark a position in code as unreachable. This can be used to
57653 * suppress control flow warnings after asm blocks that transfer
57654@@ -46,6 +52,11 @@
57655 #define __noclone __attribute__((__noclone__))
57656
57657 #endif
57658+
57659+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57660+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57661+#define __bos0(ptr) __bos((ptr), 0)
57662+#define __bos1(ptr) __bos((ptr), 1)
57663 #endif
57664
57665 #if __GNUC_MINOR__ > 0
57666diff -urNp linux-3.1.1/include/linux/compiler.h linux-3.1.1/include/linux/compiler.h
57667--- linux-3.1.1/include/linux/compiler.h 2011-11-11 15:19:27.000000000 -0500
57668+++ linux-3.1.1/include/linux/compiler.h 2011-11-16 18:39:08.000000000 -0500
57669@@ -5,31 +5,62 @@
57670
57671 #ifdef __CHECKER__
57672 # define __user __attribute__((noderef, address_space(1)))
57673+# define __force_user __force __user
57674 # define __kernel __attribute__((address_space(0)))
57675+# define __force_kernel __force __kernel
57676 # define __safe __attribute__((safe))
57677 # define __force __attribute__((force))
57678 # define __nocast __attribute__((nocast))
57679 # define __iomem __attribute__((noderef, address_space(2)))
57680+# define __force_iomem __force __iomem
57681 # define __acquires(x) __attribute__((context(x,0,1)))
57682 # define __releases(x) __attribute__((context(x,1,0)))
57683 # define __acquire(x) __context__(x,1)
57684 # define __release(x) __context__(x,-1)
57685 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57686 # define __percpu __attribute__((noderef, address_space(3)))
57687+# define __force_percpu __force __percpu
57688 #ifdef CONFIG_SPARSE_RCU_POINTER
57689 # define __rcu __attribute__((noderef, address_space(4)))
57690+# define __force_rcu __force __rcu
57691 #else
57692 # define __rcu
57693+# define __force_rcu
57694 #endif
57695 extern void __chk_user_ptr(const volatile void __user *);
57696 extern void __chk_io_ptr(const volatile void __iomem *);
57697+#elif defined(CHECKER_PLUGIN)
57698+//# define __user
57699+//# define __force_user
57700+//# define __kernel
57701+//# define __force_kernel
57702+# define __safe
57703+# define __force
57704+# define __nocast
57705+# define __iomem
57706+# define __force_iomem
57707+# define __chk_user_ptr(x) (void)0
57708+# define __chk_io_ptr(x) (void)0
57709+# define __builtin_warning(x, y...) (1)
57710+# define __acquires(x)
57711+# define __releases(x)
57712+# define __acquire(x) (void)0
57713+# define __release(x) (void)0
57714+# define __cond_lock(x,c) (c)
57715+# define __percpu
57716+# define __force_percpu
57717+# define __rcu
57718+# define __force_rcu
57719 #else
57720 # define __user
57721+# define __force_user
57722 # define __kernel
57723+# define __force_kernel
57724 # define __safe
57725 # define __force
57726 # define __nocast
57727 # define __iomem
57728+# define __force_iomem
57729 # define __chk_user_ptr(x) (void)0
57730 # define __chk_io_ptr(x) (void)0
57731 # define __builtin_warning(x, y...) (1)
57732@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57733 # define __release(x) (void)0
57734 # define __cond_lock(x,c) (c)
57735 # define __percpu
57736+# define __force_percpu
57737 # define __rcu
57738+# define __force_rcu
57739 #endif
57740
57741 #ifdef __KERNEL__
57742@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57743 # define __attribute_const__ /* unimplemented */
57744 #endif
57745
57746+#ifndef __no_const
57747+# define __no_const
57748+#endif
57749+
57750+#ifndef __do_const
57751+# define __do_const
57752+#endif
57753+
57754 /*
57755 * Tell gcc if a function is cold. The compiler will assume any path
57756 * directly leading to the call is unlikely.
57757@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57758 #define __cold
57759 #endif
57760
57761+#ifndef __alloc_size
57762+#define __alloc_size(...)
57763+#endif
57764+
57765+#ifndef __bos
57766+#define __bos(ptr, arg)
57767+#endif
57768+
57769+#ifndef __bos0
57770+#define __bos0(ptr)
57771+#endif
57772+
57773+#ifndef __bos1
57774+#define __bos1(ptr)
57775+#endif
57776+
57777 /* Simple shorthand for a section definition */
57778 #ifndef __section
57779 # define __section(S) __attribute__ ((__section__(#S)))
57780@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57781 * use is to mediate communication between process-level code and irq/NMI
57782 * handlers, all running on the same CPU.
57783 */
57784-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57785+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57786+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57787
57788 #endif /* __LINUX_COMPILER_H */
57789diff -urNp linux-3.1.1/include/linux/cpuset.h linux-3.1.1/include/linux/cpuset.h
57790--- linux-3.1.1/include/linux/cpuset.h 2011-11-11 15:19:27.000000000 -0500
57791+++ linux-3.1.1/include/linux/cpuset.h 2011-11-16 18:39:08.000000000 -0500
57792@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57793 * nodemask.
57794 */
57795 smp_mb();
57796- --ACCESS_ONCE(current->mems_allowed_change_disable);
57797+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57798 }
57799
57800 static inline void set_mems_allowed(nodemask_t nodemask)
57801diff -urNp linux-3.1.1/include/linux/crypto.h linux-3.1.1/include/linux/crypto.h
57802--- linux-3.1.1/include/linux/crypto.h 2011-11-11 15:19:27.000000000 -0500
57803+++ linux-3.1.1/include/linux/crypto.h 2011-11-16 18:39:08.000000000 -0500
57804@@ -361,7 +361,7 @@ struct cipher_tfm {
57805 const u8 *key, unsigned int keylen);
57806 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57807 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57808-};
57809+} __no_const;
57810
57811 struct hash_tfm {
57812 int (*init)(struct hash_desc *desc);
57813@@ -382,13 +382,13 @@ struct compress_tfm {
57814 int (*cot_decompress)(struct crypto_tfm *tfm,
57815 const u8 *src, unsigned int slen,
57816 u8 *dst, unsigned int *dlen);
57817-};
57818+} __no_const;
57819
57820 struct rng_tfm {
57821 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57822 unsigned int dlen);
57823 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57824-};
57825+} __no_const;
57826
57827 #define crt_ablkcipher crt_u.ablkcipher
57828 #define crt_aead crt_u.aead
57829diff -urNp linux-3.1.1/include/linux/decompress/mm.h linux-3.1.1/include/linux/decompress/mm.h
57830--- linux-3.1.1/include/linux/decompress/mm.h 2011-11-11 15:19:27.000000000 -0500
57831+++ linux-3.1.1/include/linux/decompress/mm.h 2011-11-16 18:39:08.000000000 -0500
57832@@ -77,7 +77,7 @@ static void free(void *where)
57833 * warnings when not needed (indeed large_malloc / large_free are not
57834 * needed by inflate */
57835
57836-#define malloc(a) kmalloc(a, GFP_KERNEL)
57837+#define malloc(a) kmalloc((a), GFP_KERNEL)
57838 #define free(a) kfree(a)
57839
57840 #define large_malloc(a) vmalloc(a)
57841diff -urNp linux-3.1.1/include/linux/dma-mapping.h linux-3.1.1/include/linux/dma-mapping.h
57842--- linux-3.1.1/include/linux/dma-mapping.h 2011-11-11 15:19:27.000000000 -0500
57843+++ linux-3.1.1/include/linux/dma-mapping.h 2011-11-16 18:39:08.000000000 -0500
57844@@ -42,7 +42,7 @@ struct dma_map_ops {
57845 int (*dma_supported)(struct device *dev, u64 mask);
57846 int (*set_dma_mask)(struct device *dev, u64 mask);
57847 int is_phys;
57848-};
57849+} __do_const;
57850
57851 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57852
57853diff -urNp linux-3.1.1/include/linux/efi.h linux-3.1.1/include/linux/efi.h
57854--- linux-3.1.1/include/linux/efi.h 2011-11-11 15:19:27.000000000 -0500
57855+++ linux-3.1.1/include/linux/efi.h 2011-11-16 18:39:08.000000000 -0500
57856@@ -446,7 +446,7 @@ struct efivar_operations {
57857 efi_get_variable_t *get_variable;
57858 efi_get_next_variable_t *get_next_variable;
57859 efi_set_variable_t *set_variable;
57860-};
57861+} __no_const;
57862
57863 struct efivars {
57864 /*
57865diff -urNp linux-3.1.1/include/linux/elf.h linux-3.1.1/include/linux/elf.h
57866--- linux-3.1.1/include/linux/elf.h 2011-11-11 15:19:27.000000000 -0500
57867+++ linux-3.1.1/include/linux/elf.h 2011-11-16 18:39:08.000000000 -0500
57868@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57869 #define PT_GNU_EH_FRAME 0x6474e550
57870
57871 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57872+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57873+
57874+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57875+
57876+/* Constants for the e_flags field */
57877+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57878+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57879+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57880+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57881+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57882+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57883
57884 /*
57885 * Extended Numbering
57886@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57887 #define DT_DEBUG 21
57888 #define DT_TEXTREL 22
57889 #define DT_JMPREL 23
57890+#define DT_FLAGS 30
57891+ #define DF_TEXTREL 0x00000004
57892 #define DT_ENCODING 32
57893 #define OLD_DT_LOOS 0x60000000
57894 #define DT_LOOS 0x6000000d
57895@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57896 #define PF_W 0x2
57897 #define PF_X 0x1
57898
57899+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57900+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57901+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57902+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57903+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57904+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57905+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57906+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57907+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57908+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57909+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57910+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57911+
57912 typedef struct elf32_phdr{
57913 Elf32_Word p_type;
57914 Elf32_Off p_offset;
57915@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57916 #define EI_OSABI 7
57917 #define EI_PAD 8
57918
57919+#define EI_PAX 14
57920+
57921 #define ELFMAG0 0x7f /* EI_MAG */
57922 #define ELFMAG1 'E'
57923 #define ELFMAG2 'L'
57924@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57925 #define elf_note elf32_note
57926 #define elf_addr_t Elf32_Off
57927 #define Elf_Half Elf32_Half
57928+#define elf_dyn Elf32_Dyn
57929
57930 #else
57931
57932@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57933 #define elf_note elf64_note
57934 #define elf_addr_t Elf64_Off
57935 #define Elf_Half Elf64_Half
57936+#define elf_dyn Elf64_Dyn
57937
57938 #endif
57939
57940diff -urNp linux-3.1.1/include/linux/firewire.h linux-3.1.1/include/linux/firewire.h
57941--- linux-3.1.1/include/linux/firewire.h 2011-11-11 15:19:27.000000000 -0500
57942+++ linux-3.1.1/include/linux/firewire.h 2011-11-16 18:39:08.000000000 -0500
57943@@ -428,7 +428,7 @@ struct fw_iso_context {
57944 union {
57945 fw_iso_callback_t sc;
57946 fw_iso_mc_callback_t mc;
57947- } callback;
57948+ } __no_const callback;
57949 void *callback_data;
57950 };
57951
57952diff -urNp linux-3.1.1/include/linux/fscache-cache.h linux-3.1.1/include/linux/fscache-cache.h
57953--- linux-3.1.1/include/linux/fscache-cache.h 2011-11-11 15:19:27.000000000 -0500
57954+++ linux-3.1.1/include/linux/fscache-cache.h 2011-11-16 18:39:08.000000000 -0500
57955@@ -102,7 +102,7 @@ struct fscache_operation {
57956 fscache_operation_release_t release;
57957 };
57958
57959-extern atomic_t fscache_op_debug_id;
57960+extern atomic_unchecked_t fscache_op_debug_id;
57961 extern void fscache_op_work_func(struct work_struct *work);
57962
57963 extern void fscache_enqueue_operation(struct fscache_operation *);
57964@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57965 {
57966 INIT_WORK(&op->work, fscache_op_work_func);
57967 atomic_set(&op->usage, 1);
57968- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57969+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57970 op->processor = processor;
57971 op->release = release;
57972 INIT_LIST_HEAD(&op->pend_link);
57973diff -urNp linux-3.1.1/include/linux/fs.h linux-3.1.1/include/linux/fs.h
57974--- linux-3.1.1/include/linux/fs.h 2011-11-11 15:19:27.000000000 -0500
57975+++ linux-3.1.1/include/linux/fs.h 2011-11-16 23:39:39.000000000 -0500
57976@@ -1588,7 +1588,8 @@ struct file_operations {
57977 int (*setlease)(struct file *, long, struct file_lock **);
57978 long (*fallocate)(struct file *file, int mode, loff_t offset,
57979 loff_t len);
57980-};
57981+} __do_const;
57982+typedef struct file_operations __no_const file_operations_no_const;
57983
57984 struct inode_operations {
57985 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
57986diff -urNp linux-3.1.1/include/linux/fsnotify.h linux-3.1.1/include/linux/fsnotify.h
57987--- linux-3.1.1/include/linux/fsnotify.h 2011-11-11 15:19:27.000000000 -0500
57988+++ linux-3.1.1/include/linux/fsnotify.h 2011-11-16 18:39:08.000000000 -0500
57989@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57990 */
57991 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57992 {
57993- return kstrdup(name, GFP_KERNEL);
57994+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57995 }
57996
57997 /*
57998diff -urNp linux-3.1.1/include/linux/fs_struct.h linux-3.1.1/include/linux/fs_struct.h
57999--- linux-3.1.1/include/linux/fs_struct.h 2011-11-11 15:19:27.000000000 -0500
58000+++ linux-3.1.1/include/linux/fs_struct.h 2011-11-16 18:39:08.000000000 -0500
58001@@ -6,7 +6,7 @@
58002 #include <linux/seqlock.h>
58003
58004 struct fs_struct {
58005- int users;
58006+ atomic_t users;
58007 spinlock_t lock;
58008 seqcount_t seq;
58009 int umask;
58010diff -urNp linux-3.1.1/include/linux/ftrace_event.h linux-3.1.1/include/linux/ftrace_event.h
58011--- linux-3.1.1/include/linux/ftrace_event.h 2011-11-11 15:19:27.000000000 -0500
58012+++ linux-3.1.1/include/linux/ftrace_event.h 2011-11-16 18:39:08.000000000 -0500
58013@@ -97,7 +97,7 @@ struct trace_event_functions {
58014 trace_print_func raw;
58015 trace_print_func hex;
58016 trace_print_func binary;
58017-};
58018+} __no_const;
58019
58020 struct trace_event {
58021 struct hlist_node node;
58022@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftr
58023 extern int trace_add_event_call(struct ftrace_event_call *call);
58024 extern void trace_remove_event_call(struct ftrace_event_call *call);
58025
58026-#define is_signed_type(type) (((type)(-1)) < 0)
58027+#define is_signed_type(type) (((type)(-1)) < (type)1)
58028
58029 int trace_set_clr_event(const char *system, const char *event, int set);
58030
58031diff -urNp linux-3.1.1/include/linux/genhd.h linux-3.1.1/include/linux/genhd.h
58032--- linux-3.1.1/include/linux/genhd.h 2011-11-11 15:19:27.000000000 -0500
58033+++ linux-3.1.1/include/linux/genhd.h 2011-11-16 18:39:08.000000000 -0500
58034@@ -184,7 +184,7 @@ struct gendisk {
58035 struct kobject *slave_dir;
58036
58037 struct timer_rand_state *random;
58038- atomic_t sync_io; /* RAID */
58039+ atomic_unchecked_t sync_io; /* RAID */
58040 struct disk_events *ev;
58041 #ifdef CONFIG_BLK_DEV_INTEGRITY
58042 struct blk_integrity *integrity;
58043diff -urNp linux-3.1.1/include/linux/gracl.h linux-3.1.1/include/linux/gracl.h
58044--- linux-3.1.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58045+++ linux-3.1.1/include/linux/gracl.h 2011-11-16 18:40:31.000000000 -0500
58046@@ -0,0 +1,317 @@
58047+#ifndef GR_ACL_H
58048+#define GR_ACL_H
58049+
58050+#include <linux/grdefs.h>
58051+#include <linux/resource.h>
58052+#include <linux/capability.h>
58053+#include <linux/dcache.h>
58054+#include <asm/resource.h>
58055+
58056+/* Major status information */
58057+
58058+#define GR_VERSION "grsecurity 2.2.2"
58059+#define GRSECURITY_VERSION 0x2202
58060+
58061+enum {
58062+ GR_SHUTDOWN = 0,
58063+ GR_ENABLE = 1,
58064+ GR_SPROLE = 2,
58065+ GR_RELOAD = 3,
58066+ GR_SEGVMOD = 4,
58067+ GR_STATUS = 5,
58068+ GR_UNSPROLE = 6,
58069+ GR_PASSSET = 7,
58070+ GR_SPROLEPAM = 8,
58071+};
58072+
58073+/* Password setup definitions
58074+ * kernel/grhash.c */
58075+enum {
58076+ GR_PW_LEN = 128,
58077+ GR_SALT_LEN = 16,
58078+ GR_SHA_LEN = 32,
58079+};
58080+
58081+enum {
58082+ GR_SPROLE_LEN = 64,
58083+};
58084+
58085+enum {
58086+ GR_NO_GLOB = 0,
58087+ GR_REG_GLOB,
58088+ GR_CREATE_GLOB
58089+};
58090+
58091+#define GR_NLIMITS 32
58092+
58093+/* Begin Data Structures */
58094+
58095+struct sprole_pw {
58096+ unsigned char *rolename;
58097+ unsigned char salt[GR_SALT_LEN];
58098+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58099+};
58100+
58101+struct name_entry {
58102+ __u32 key;
58103+ ino_t inode;
58104+ dev_t device;
58105+ char *name;
58106+ __u16 len;
58107+ __u8 deleted;
58108+ struct name_entry *prev;
58109+ struct name_entry *next;
58110+};
58111+
58112+struct inodev_entry {
58113+ struct name_entry *nentry;
58114+ struct inodev_entry *prev;
58115+ struct inodev_entry *next;
58116+};
58117+
58118+struct acl_role_db {
58119+ struct acl_role_label **r_hash;
58120+ __u32 r_size;
58121+};
58122+
58123+struct inodev_db {
58124+ struct inodev_entry **i_hash;
58125+ __u32 i_size;
58126+};
58127+
58128+struct name_db {
58129+ struct name_entry **n_hash;
58130+ __u32 n_size;
58131+};
58132+
58133+struct crash_uid {
58134+ uid_t uid;
58135+ unsigned long expires;
58136+};
58137+
58138+struct gr_hash_struct {
58139+ void **table;
58140+ void **nametable;
58141+ void *first;
58142+ __u32 table_size;
58143+ __u32 used_size;
58144+ int type;
58145+};
58146+
58147+/* Userspace Grsecurity ACL data structures */
58148+
58149+struct acl_subject_label {
58150+ char *filename;
58151+ ino_t inode;
58152+ dev_t device;
58153+ __u32 mode;
58154+ kernel_cap_t cap_mask;
58155+ kernel_cap_t cap_lower;
58156+ kernel_cap_t cap_invert_audit;
58157+
58158+ struct rlimit res[GR_NLIMITS];
58159+ __u32 resmask;
58160+
58161+ __u8 user_trans_type;
58162+ __u8 group_trans_type;
58163+ uid_t *user_transitions;
58164+ gid_t *group_transitions;
58165+ __u16 user_trans_num;
58166+ __u16 group_trans_num;
58167+
58168+ __u32 sock_families[2];
58169+ __u32 ip_proto[8];
58170+ __u32 ip_type;
58171+ struct acl_ip_label **ips;
58172+ __u32 ip_num;
58173+ __u32 inaddr_any_override;
58174+
58175+ __u32 crashes;
58176+ unsigned long expires;
58177+
58178+ struct acl_subject_label *parent_subject;
58179+ struct gr_hash_struct *hash;
58180+ struct acl_subject_label *prev;
58181+ struct acl_subject_label *next;
58182+
58183+ struct acl_object_label **obj_hash;
58184+ __u32 obj_hash_size;
58185+ __u16 pax_flags;
58186+};
58187+
58188+struct role_allowed_ip {
58189+ __u32 addr;
58190+ __u32 netmask;
58191+
58192+ struct role_allowed_ip *prev;
58193+ struct role_allowed_ip *next;
58194+};
58195+
58196+struct role_transition {
58197+ char *rolename;
58198+
58199+ struct role_transition *prev;
58200+ struct role_transition *next;
58201+};
58202+
58203+struct acl_role_label {
58204+ char *rolename;
58205+ uid_t uidgid;
58206+ __u16 roletype;
58207+
58208+ __u16 auth_attempts;
58209+ unsigned long expires;
58210+
58211+ struct acl_subject_label *root_label;
58212+ struct gr_hash_struct *hash;
58213+
58214+ struct acl_role_label *prev;
58215+ struct acl_role_label *next;
58216+
58217+ struct role_transition *transitions;
58218+ struct role_allowed_ip *allowed_ips;
58219+ uid_t *domain_children;
58220+ __u16 domain_child_num;
58221+
58222+ struct acl_subject_label **subj_hash;
58223+ __u32 subj_hash_size;
58224+};
58225+
58226+struct user_acl_role_db {
58227+ struct acl_role_label **r_table;
58228+ __u32 num_pointers; /* Number of allocations to track */
58229+ __u32 num_roles; /* Number of roles */
58230+ __u32 num_domain_children; /* Number of domain children */
58231+ __u32 num_subjects; /* Number of subjects */
58232+ __u32 num_objects; /* Number of objects */
58233+};
58234+
58235+struct acl_object_label {
58236+ char *filename;
58237+ ino_t inode;
58238+ dev_t device;
58239+ __u32 mode;
58240+
58241+ struct acl_subject_label *nested;
58242+ struct acl_object_label *globbed;
58243+
58244+ /* next two structures not used */
58245+
58246+ struct acl_object_label *prev;
58247+ struct acl_object_label *next;
58248+};
58249+
58250+struct acl_ip_label {
58251+ char *iface;
58252+ __u32 addr;
58253+ __u32 netmask;
58254+ __u16 low, high;
58255+ __u8 mode;
58256+ __u32 type;
58257+ __u32 proto[8];
58258+
58259+ /* next two structures not used */
58260+
58261+ struct acl_ip_label *prev;
58262+ struct acl_ip_label *next;
58263+};
58264+
58265+struct gr_arg {
58266+ struct user_acl_role_db role_db;
58267+ unsigned char pw[GR_PW_LEN];
58268+ unsigned char salt[GR_SALT_LEN];
58269+ unsigned char sum[GR_SHA_LEN];
58270+ unsigned char sp_role[GR_SPROLE_LEN];
58271+ struct sprole_pw *sprole_pws;
58272+ dev_t segv_device;
58273+ ino_t segv_inode;
58274+ uid_t segv_uid;
58275+ __u16 num_sprole_pws;
58276+ __u16 mode;
58277+};
58278+
58279+struct gr_arg_wrapper {
58280+ struct gr_arg *arg;
58281+ __u32 version;
58282+ __u32 size;
58283+};
58284+
58285+struct subject_map {
58286+ struct acl_subject_label *user;
58287+ struct acl_subject_label *kernel;
58288+ struct subject_map *prev;
58289+ struct subject_map *next;
58290+};
58291+
58292+struct acl_subj_map_db {
58293+ struct subject_map **s_hash;
58294+ __u32 s_size;
58295+};
58296+
58297+/* End Data Structures Section */
58298+
58299+/* Hash functions generated by empirical testing by Brad Spengler
58300+ Makes good use of the low bits of the inode. Generally 0-1 times
58301+ in loop for successful match. 0-3 for unsuccessful match.
58302+ Shift/add algorithm with modulus of table size and an XOR*/
58303+
58304+static __inline__ unsigned int
58305+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58306+{
58307+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58308+}
58309+
58310+ static __inline__ unsigned int
58311+shash(const struct acl_subject_label *userp, const unsigned int sz)
58312+{
58313+ return ((const unsigned long)userp % sz);
58314+}
58315+
58316+static __inline__ unsigned int
58317+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58318+{
58319+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58320+}
58321+
58322+static __inline__ unsigned int
58323+nhash(const char *name, const __u16 len, const unsigned int sz)
58324+{
58325+ return full_name_hash((const unsigned char *)name, len) % sz;
58326+}
58327+
58328+#define FOR_EACH_ROLE_START(role) \
58329+ role = role_list; \
58330+ while (role) {
58331+
58332+#define FOR_EACH_ROLE_END(role) \
58333+ role = role->prev; \
58334+ }
58335+
58336+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58337+ subj = NULL; \
58338+ iter = 0; \
58339+ while (iter < role->subj_hash_size) { \
58340+ if (subj == NULL) \
58341+ subj = role->subj_hash[iter]; \
58342+ if (subj == NULL) { \
58343+ iter++; \
58344+ continue; \
58345+ }
58346+
58347+#define FOR_EACH_SUBJECT_END(subj,iter) \
58348+ subj = subj->next; \
58349+ if (subj == NULL) \
58350+ iter++; \
58351+ }
58352+
58353+
58354+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58355+ subj = role->hash->first; \
58356+ while (subj != NULL) {
58357+
58358+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58359+ subj = subj->next; \
58360+ }
58361+
58362+#endif
58363+
58364diff -urNp linux-3.1.1/include/linux/gralloc.h linux-3.1.1/include/linux/gralloc.h
58365--- linux-3.1.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58366+++ linux-3.1.1/include/linux/gralloc.h 2011-11-16 18:40:31.000000000 -0500
58367@@ -0,0 +1,9 @@
58368+#ifndef __GRALLOC_H
58369+#define __GRALLOC_H
58370+
58371+void acl_free_all(void);
58372+int acl_alloc_stack_init(unsigned long size);
58373+void *acl_alloc(unsigned long len);
58374+void *acl_alloc_num(unsigned long num, unsigned long len);
58375+
58376+#endif
58377diff -urNp linux-3.1.1/include/linux/grdefs.h linux-3.1.1/include/linux/grdefs.h
58378--- linux-3.1.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58379+++ linux-3.1.1/include/linux/grdefs.h 2011-11-16 18:40:31.000000000 -0500
58380@@ -0,0 +1,140 @@
58381+#ifndef GRDEFS_H
58382+#define GRDEFS_H
58383+
58384+/* Begin grsecurity status declarations */
58385+
58386+enum {
58387+ GR_READY = 0x01,
58388+ GR_STATUS_INIT = 0x00 // disabled state
58389+};
58390+
58391+/* Begin ACL declarations */
58392+
58393+/* Role flags */
58394+
58395+enum {
58396+ GR_ROLE_USER = 0x0001,
58397+ GR_ROLE_GROUP = 0x0002,
58398+ GR_ROLE_DEFAULT = 0x0004,
58399+ GR_ROLE_SPECIAL = 0x0008,
58400+ GR_ROLE_AUTH = 0x0010,
58401+ GR_ROLE_NOPW = 0x0020,
58402+ GR_ROLE_GOD = 0x0040,
58403+ GR_ROLE_LEARN = 0x0080,
58404+ GR_ROLE_TPE = 0x0100,
58405+ GR_ROLE_DOMAIN = 0x0200,
58406+ GR_ROLE_PAM = 0x0400,
58407+ GR_ROLE_PERSIST = 0x0800
58408+};
58409+
58410+/* ACL Subject and Object mode flags */
58411+enum {
58412+ GR_DELETED = 0x80000000
58413+};
58414+
58415+/* ACL Object-only mode flags */
58416+enum {
58417+ GR_READ = 0x00000001,
58418+ GR_APPEND = 0x00000002,
58419+ GR_WRITE = 0x00000004,
58420+ GR_EXEC = 0x00000008,
58421+ GR_FIND = 0x00000010,
58422+ GR_INHERIT = 0x00000020,
58423+ GR_SETID = 0x00000040,
58424+ GR_CREATE = 0x00000080,
58425+ GR_DELETE = 0x00000100,
58426+ GR_LINK = 0x00000200,
58427+ GR_AUDIT_READ = 0x00000400,
58428+ GR_AUDIT_APPEND = 0x00000800,
58429+ GR_AUDIT_WRITE = 0x00001000,
58430+ GR_AUDIT_EXEC = 0x00002000,
58431+ GR_AUDIT_FIND = 0x00004000,
58432+ GR_AUDIT_INHERIT= 0x00008000,
58433+ GR_AUDIT_SETID = 0x00010000,
58434+ GR_AUDIT_CREATE = 0x00020000,
58435+ GR_AUDIT_DELETE = 0x00040000,
58436+ GR_AUDIT_LINK = 0x00080000,
58437+ GR_PTRACERD = 0x00100000,
58438+ GR_NOPTRACE = 0x00200000,
58439+ GR_SUPPRESS = 0x00400000,
58440+ GR_NOLEARN = 0x00800000,
58441+ GR_INIT_TRANSFER= 0x01000000
58442+};
58443+
58444+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58445+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58446+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58447+
58448+/* ACL subject-only mode flags */
58449+enum {
58450+ GR_KILL = 0x00000001,
58451+ GR_VIEW = 0x00000002,
58452+ GR_PROTECTED = 0x00000004,
58453+ GR_LEARN = 0x00000008,
58454+ GR_OVERRIDE = 0x00000010,
58455+ /* just a placeholder, this mode is only used in userspace */
58456+ GR_DUMMY = 0x00000020,
58457+ GR_PROTSHM = 0x00000040,
58458+ GR_KILLPROC = 0x00000080,
58459+ GR_KILLIPPROC = 0x00000100,
58460+ /* just a placeholder, this mode is only used in userspace */
58461+ GR_NOTROJAN = 0x00000200,
58462+ GR_PROTPROCFD = 0x00000400,
58463+ GR_PROCACCT = 0x00000800,
58464+ GR_RELAXPTRACE = 0x00001000,
58465+ GR_NESTED = 0x00002000,
58466+ GR_INHERITLEARN = 0x00004000,
58467+ GR_PROCFIND = 0x00008000,
58468+ GR_POVERRIDE = 0x00010000,
58469+ GR_KERNELAUTH = 0x00020000,
58470+ GR_ATSECURE = 0x00040000,
58471+ GR_SHMEXEC = 0x00080000
58472+};
58473+
58474+enum {
58475+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58476+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58477+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58478+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58479+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58480+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58481+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58482+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58483+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58484+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58485+};
58486+
58487+enum {
58488+ GR_ID_USER = 0x01,
58489+ GR_ID_GROUP = 0x02,
58490+};
58491+
58492+enum {
58493+ GR_ID_ALLOW = 0x01,
58494+ GR_ID_DENY = 0x02,
58495+};
58496+
58497+#define GR_CRASH_RES 31
58498+#define GR_UIDTABLE_MAX 500
58499+
58500+/* begin resource learning section */
58501+enum {
58502+ GR_RLIM_CPU_BUMP = 60,
58503+ GR_RLIM_FSIZE_BUMP = 50000,
58504+ GR_RLIM_DATA_BUMP = 10000,
58505+ GR_RLIM_STACK_BUMP = 1000,
58506+ GR_RLIM_CORE_BUMP = 10000,
58507+ GR_RLIM_RSS_BUMP = 500000,
58508+ GR_RLIM_NPROC_BUMP = 1,
58509+ GR_RLIM_NOFILE_BUMP = 5,
58510+ GR_RLIM_MEMLOCK_BUMP = 50000,
58511+ GR_RLIM_AS_BUMP = 500000,
58512+ GR_RLIM_LOCKS_BUMP = 2,
58513+ GR_RLIM_SIGPENDING_BUMP = 5,
58514+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58515+ GR_RLIM_NICE_BUMP = 1,
58516+ GR_RLIM_RTPRIO_BUMP = 1,
58517+ GR_RLIM_RTTIME_BUMP = 1000000
58518+};
58519+
58520+#endif
58521diff -urNp linux-3.1.1/include/linux/grinternal.h linux-3.1.1/include/linux/grinternal.h
58522--- linux-3.1.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58523+++ linux-3.1.1/include/linux/grinternal.h 2011-11-16 18:40:31.000000000 -0500
58524@@ -0,0 +1,220 @@
58525+#ifndef __GRINTERNAL_H
58526+#define __GRINTERNAL_H
58527+
58528+#ifdef CONFIG_GRKERNSEC
58529+
58530+#include <linux/fs.h>
58531+#include <linux/mnt_namespace.h>
58532+#include <linux/nsproxy.h>
58533+#include <linux/gracl.h>
58534+#include <linux/grdefs.h>
58535+#include <linux/grmsg.h>
58536+
58537+void gr_add_learn_entry(const char *fmt, ...)
58538+ __attribute__ ((format (printf, 1, 2)));
58539+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58540+ const struct vfsmount *mnt);
58541+__u32 gr_check_create(const struct dentry *new_dentry,
58542+ const struct dentry *parent,
58543+ const struct vfsmount *mnt, const __u32 mode);
58544+int gr_check_protected_task(const struct task_struct *task);
58545+__u32 to_gr_audit(const __u32 reqmode);
58546+int gr_set_acls(const int type);
58547+int gr_apply_subject_to_task(struct task_struct *task);
58548+int gr_acl_is_enabled(void);
58549+char gr_roletype_to_char(void);
58550+
58551+void gr_handle_alertkill(struct task_struct *task);
58552+char *gr_to_filename(const struct dentry *dentry,
58553+ const struct vfsmount *mnt);
58554+char *gr_to_filename1(const struct dentry *dentry,
58555+ const struct vfsmount *mnt);
58556+char *gr_to_filename2(const struct dentry *dentry,
58557+ const struct vfsmount *mnt);
58558+char *gr_to_filename3(const struct dentry *dentry,
58559+ const struct vfsmount *mnt);
58560+
58561+extern int grsec_enable_harden_ptrace;
58562+extern int grsec_enable_link;
58563+extern int grsec_enable_fifo;
58564+extern int grsec_enable_execve;
58565+extern int grsec_enable_shm;
58566+extern int grsec_enable_execlog;
58567+extern int grsec_enable_signal;
58568+extern int grsec_enable_audit_ptrace;
58569+extern int grsec_enable_forkfail;
58570+extern int grsec_enable_time;
58571+extern int grsec_enable_rofs;
58572+extern int grsec_enable_chroot_shmat;
58573+extern int grsec_enable_chroot_mount;
58574+extern int grsec_enable_chroot_double;
58575+extern int grsec_enable_chroot_pivot;
58576+extern int grsec_enable_chroot_chdir;
58577+extern int grsec_enable_chroot_chmod;
58578+extern int grsec_enable_chroot_mknod;
58579+extern int grsec_enable_chroot_fchdir;
58580+extern int grsec_enable_chroot_nice;
58581+extern int grsec_enable_chroot_execlog;
58582+extern int grsec_enable_chroot_caps;
58583+extern int grsec_enable_chroot_sysctl;
58584+extern int grsec_enable_chroot_unix;
58585+extern int grsec_enable_tpe;
58586+extern int grsec_tpe_gid;
58587+extern int grsec_enable_tpe_all;
58588+extern int grsec_enable_tpe_invert;
58589+extern int grsec_enable_socket_all;
58590+extern int grsec_socket_all_gid;
58591+extern int grsec_enable_socket_client;
58592+extern int grsec_socket_client_gid;
58593+extern int grsec_enable_socket_server;
58594+extern int grsec_socket_server_gid;
58595+extern int grsec_audit_gid;
58596+extern int grsec_enable_group;
58597+extern int grsec_enable_audit_textrel;
58598+extern int grsec_enable_log_rwxmaps;
58599+extern int grsec_enable_mount;
58600+extern int grsec_enable_chdir;
58601+extern int grsec_resource_logging;
58602+extern int grsec_enable_blackhole;
58603+extern int grsec_lastack_retries;
58604+extern int grsec_enable_brute;
58605+extern int grsec_lock;
58606+
58607+extern spinlock_t grsec_alert_lock;
58608+extern unsigned long grsec_alert_wtime;
58609+extern unsigned long grsec_alert_fyet;
58610+
58611+extern spinlock_t grsec_audit_lock;
58612+
58613+extern rwlock_t grsec_exec_file_lock;
58614+
58615+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58616+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58617+ (tsk)->exec_file->f_vfsmnt) : "/")
58618+
58619+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58620+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58621+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58622+
58623+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58624+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58625+ (tsk)->exec_file->f_vfsmnt) : "/")
58626+
58627+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58628+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58629+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58630+
58631+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58632+
58633+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58634+
58635+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58636+ (task)->pid, (cred)->uid, \
58637+ (cred)->euid, (cred)->gid, (cred)->egid, \
58638+ gr_parent_task_fullpath(task), \
58639+ (task)->real_parent->comm, (task)->real_parent->pid, \
58640+ (pcred)->uid, (pcred)->euid, \
58641+ (pcred)->gid, (pcred)->egid
58642+
58643+#define GR_CHROOT_CAPS {{ \
58644+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58645+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58646+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58647+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58648+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58649+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58650+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58651+
58652+#define security_learn(normal_msg,args...) \
58653+({ \
58654+ read_lock(&grsec_exec_file_lock); \
58655+ gr_add_learn_entry(normal_msg "\n", ## args); \
58656+ read_unlock(&grsec_exec_file_lock); \
58657+})
58658+
58659+enum {
58660+ GR_DO_AUDIT,
58661+ GR_DONT_AUDIT,
58662+ /* used for non-audit messages that we shouldn't kill the task on */
58663+ GR_DONT_AUDIT_GOOD
58664+};
58665+
58666+enum {
58667+ GR_TTYSNIFF,
58668+ GR_RBAC,
58669+ GR_RBAC_STR,
58670+ GR_STR_RBAC,
58671+ GR_RBAC_MODE2,
58672+ GR_RBAC_MODE3,
58673+ GR_FILENAME,
58674+ GR_SYSCTL_HIDDEN,
58675+ GR_NOARGS,
58676+ GR_ONE_INT,
58677+ GR_ONE_INT_TWO_STR,
58678+ GR_ONE_STR,
58679+ GR_STR_INT,
58680+ GR_TWO_STR_INT,
58681+ GR_TWO_INT,
58682+ GR_TWO_U64,
58683+ GR_THREE_INT,
58684+ GR_FIVE_INT_TWO_STR,
58685+ GR_TWO_STR,
58686+ GR_THREE_STR,
58687+ GR_FOUR_STR,
58688+ GR_STR_FILENAME,
58689+ GR_FILENAME_STR,
58690+ GR_FILENAME_TWO_INT,
58691+ GR_FILENAME_TWO_INT_STR,
58692+ GR_TEXTREL,
58693+ GR_PTRACE,
58694+ GR_RESOURCE,
58695+ GR_CAP,
58696+ GR_SIG,
58697+ GR_SIG2,
58698+ GR_CRASH1,
58699+ GR_CRASH2,
58700+ GR_PSACCT,
58701+ GR_RWXMAP
58702+};
58703+
58704+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58705+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58706+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58707+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58708+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58709+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58710+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58711+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58712+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58713+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58714+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58715+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58716+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58717+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58718+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58719+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58720+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58721+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58722+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58723+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58724+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58725+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58726+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58727+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58728+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58729+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58730+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58731+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58732+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58733+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58734+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58735+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58736+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58737+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58738+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58739+
58740+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58741+
58742+#endif
58743+
58744+#endif
58745diff -urNp linux-3.1.1/include/linux/grmsg.h linux-3.1.1/include/linux/grmsg.h
58746--- linux-3.1.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58747+++ linux-3.1.1/include/linux/grmsg.h 2011-11-16 18:40:31.000000000 -0500
58748@@ -0,0 +1,108 @@
58749+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58750+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58751+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58752+#define GR_STOPMOD_MSG "denied modification of module state by "
58753+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58754+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58755+#define GR_IOPERM_MSG "denied use of ioperm() by "
58756+#define GR_IOPL_MSG "denied use of iopl() by "
58757+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58758+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58759+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58760+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58761+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58762+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58763+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58764+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58765+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58766+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58767+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58768+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58769+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58770+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58771+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58772+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58773+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58774+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58775+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58776+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58777+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58778+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58779+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58780+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58781+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58782+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58783+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58784+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58785+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58786+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58787+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58788+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58789+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58790+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58791+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58792+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58793+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58794+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58795+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58796+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58797+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58798+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58799+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58800+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58801+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58802+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58803+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58804+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58805+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58806+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58807+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58808+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58809+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58810+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58811+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58812+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58813+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58814+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58815+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58816+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58817+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58818+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58819+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58820+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58821+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58822+#define GR_NICE_CHROOT_MSG "denied priority change by "
58823+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58824+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58825+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58826+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58827+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58828+#define GR_TIME_MSG "time set by "
58829+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58830+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58831+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58832+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58833+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58834+#define GR_BIND_MSG "denied bind() by "
58835+#define GR_CONNECT_MSG "denied connect() by "
58836+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58837+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58838+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58839+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58840+#define GR_CAP_ACL_MSG "use of %s denied for "
58841+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58842+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58843+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58844+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58845+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58846+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58847+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58848+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58849+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58850+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58851+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58852+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58853+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58854+#define GR_VM86_MSG "denied use of vm86 by "
58855+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58856+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58857diff -urNp linux-3.1.1/include/linux/grsecurity.h linux-3.1.1/include/linux/grsecurity.h
58858--- linux-3.1.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58859+++ linux-3.1.1/include/linux/grsecurity.h 2011-11-17 00:16:10.000000000 -0500
58860@@ -0,0 +1,228 @@
58861+#ifndef GR_SECURITY_H
58862+#define GR_SECURITY_H
58863+#include <linux/fs.h>
58864+#include <linux/fs_struct.h>
58865+#include <linux/binfmts.h>
58866+#include <linux/gracl.h>
58867+
58868+/* notify of brain-dead configs */
58869+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58870+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58871+#endif
58872+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58873+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58874+#endif
58875+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58876+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58877+#endif
58878+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58879+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58880+#endif
58881+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58882+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58883+#endif
58884+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58885+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58886+#endif
58887+
58888+#include <linux/compat.h>
58889+
58890+struct user_arg_ptr {
58891+#ifdef CONFIG_COMPAT
58892+ bool is_compat;
58893+#endif
58894+ union {
58895+ const char __user *const __user *native;
58896+#ifdef CONFIG_COMPAT
58897+ compat_uptr_t __user *compat;
58898+#endif
58899+ } ptr;
58900+};
58901+
58902+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58903+void gr_handle_brute_check(void);
58904+void gr_handle_kernel_exploit(void);
58905+int gr_process_user_ban(void);
58906+
58907+char gr_roletype_to_char(void);
58908+
58909+int gr_acl_enable_at_secure(void);
58910+
58911+int gr_check_user_change(int real, int effective, int fs);
58912+int gr_check_group_change(int real, int effective, int fs);
58913+
58914+void gr_del_task_from_ip_table(struct task_struct *p);
58915+
58916+int gr_pid_is_chrooted(struct task_struct *p);
58917+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58918+int gr_handle_chroot_nice(void);
58919+int gr_handle_chroot_sysctl(const int op);
58920+int gr_handle_chroot_setpriority(struct task_struct *p,
58921+ const int niceval);
58922+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58923+int gr_handle_chroot_chroot(const struct dentry *dentry,
58924+ const struct vfsmount *mnt);
58925+void gr_handle_chroot_chdir(struct path *path);
58926+int gr_handle_chroot_chmod(const struct dentry *dentry,
58927+ const struct vfsmount *mnt, const int mode);
58928+int gr_handle_chroot_mknod(const struct dentry *dentry,
58929+ const struct vfsmount *mnt, const int mode);
58930+int gr_handle_chroot_mount(const struct dentry *dentry,
58931+ const struct vfsmount *mnt,
58932+ const char *dev_name);
58933+int gr_handle_chroot_pivot(void);
58934+int gr_handle_chroot_unix(const pid_t pid);
58935+
58936+int gr_handle_rawio(const struct inode *inode);
58937+
58938+void gr_handle_ioperm(void);
58939+void gr_handle_iopl(void);
58940+
58941+int gr_tpe_allow(const struct file *file);
58942+
58943+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58944+void gr_clear_chroot_entries(struct task_struct *task);
58945+
58946+void gr_log_forkfail(const int retval);
58947+void gr_log_timechange(void);
58948+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58949+void gr_log_chdir(const struct dentry *dentry,
58950+ const struct vfsmount *mnt);
58951+void gr_log_chroot_exec(const struct dentry *dentry,
58952+ const struct vfsmount *mnt);
58953+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58954+void gr_log_remount(const char *devname, const int retval);
58955+void gr_log_unmount(const char *devname, const int retval);
58956+void gr_log_mount(const char *from, const char *to, const int retval);
58957+void gr_log_textrel(struct vm_area_struct *vma);
58958+void gr_log_rwxmmap(struct file *file);
58959+void gr_log_rwxmprotect(struct file *file);
58960+
58961+int gr_handle_follow_link(const struct inode *parent,
58962+ const struct inode *inode,
58963+ const struct dentry *dentry,
58964+ const struct vfsmount *mnt);
58965+int gr_handle_fifo(const struct dentry *dentry,
58966+ const struct vfsmount *mnt,
58967+ const struct dentry *dir, const int flag,
58968+ const int acc_mode);
58969+int gr_handle_hardlink(const struct dentry *dentry,
58970+ const struct vfsmount *mnt,
58971+ struct inode *inode,
58972+ const int mode, const char *to);
58973+
58974+int gr_is_capable(const int cap);
58975+int gr_is_capable_nolog(const int cap);
58976+void gr_learn_resource(const struct task_struct *task, const int limit,
58977+ const unsigned long wanted, const int gt);
58978+void gr_copy_label(struct task_struct *tsk);
58979+void gr_handle_crash(struct task_struct *task, const int sig);
58980+int gr_handle_signal(const struct task_struct *p, const int sig);
58981+int gr_check_crash_uid(const uid_t uid);
58982+int gr_check_protected_task(const struct task_struct *task);
58983+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58984+int gr_acl_handle_mmap(const struct file *file,
58985+ const unsigned long prot);
58986+int gr_acl_handle_mprotect(const struct file *file,
58987+ const unsigned long prot);
58988+int gr_check_hidden_task(const struct task_struct *tsk);
58989+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58990+ const struct vfsmount *mnt);
58991+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58992+ const struct vfsmount *mnt);
58993+__u32 gr_acl_handle_access(const struct dentry *dentry,
58994+ const struct vfsmount *mnt, const int fmode);
58995+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58996+ const struct vfsmount *mnt, mode_t mode);
58997+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58998+ const struct vfsmount *mnt, mode_t mode);
58999+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59000+ const struct vfsmount *mnt);
59001+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59002+ const struct vfsmount *mnt);
59003+int gr_handle_ptrace(struct task_struct *task, const long request);
59004+int gr_handle_proc_ptrace(struct task_struct *task);
59005+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59006+ const struct vfsmount *mnt);
59007+int gr_check_crash_exec(const struct file *filp);
59008+int gr_acl_is_enabled(void);
59009+void gr_set_kernel_label(struct task_struct *task);
59010+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59011+ const gid_t gid);
59012+int gr_set_proc_label(const struct dentry *dentry,
59013+ const struct vfsmount *mnt,
59014+ const int unsafe_share);
59015+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59016+ const struct vfsmount *mnt);
59017+__u32 gr_acl_handle_open(const struct dentry *dentry,
59018+ const struct vfsmount *mnt, int acc_mode);
59019+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59020+ const struct dentry *p_dentry,
59021+ const struct vfsmount *p_mnt,
59022+ int open_flags, int acc_mode, const int imode);
59023+void gr_handle_create(const struct dentry *dentry,
59024+ const struct vfsmount *mnt);
59025+void gr_handle_proc_create(const struct dentry *dentry,
59026+ const struct inode *inode);
59027+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59028+ const struct dentry *parent_dentry,
59029+ const struct vfsmount *parent_mnt,
59030+ const int mode);
59031+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59032+ const struct dentry *parent_dentry,
59033+ const struct vfsmount *parent_mnt);
59034+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59035+ const struct vfsmount *mnt);
59036+void gr_handle_delete(const ino_t ino, const dev_t dev);
59037+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59038+ const struct vfsmount *mnt);
59039+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59040+ const struct dentry *parent_dentry,
59041+ const struct vfsmount *parent_mnt,
59042+ const char *from);
59043+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59044+ const struct dentry *parent_dentry,
59045+ const struct vfsmount *parent_mnt,
59046+ const struct dentry *old_dentry,
59047+ const struct vfsmount *old_mnt, const char *to);
59048+int gr_acl_handle_rename(struct dentry *new_dentry,
59049+ struct dentry *parent_dentry,
59050+ const struct vfsmount *parent_mnt,
59051+ struct dentry *old_dentry,
59052+ struct inode *old_parent_inode,
59053+ struct vfsmount *old_mnt, const char *newname);
59054+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59055+ struct dentry *old_dentry,
59056+ struct dentry *new_dentry,
59057+ struct vfsmount *mnt, const __u8 replace);
59058+__u32 gr_check_link(const struct dentry *new_dentry,
59059+ const struct dentry *parent_dentry,
59060+ const struct vfsmount *parent_mnt,
59061+ const struct dentry *old_dentry,
59062+ const struct vfsmount *old_mnt);
59063+int gr_acl_handle_filldir(const struct file *file, const char *name,
59064+ const unsigned int namelen, const ino_t ino);
59065+
59066+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59067+ const struct vfsmount *mnt);
59068+void gr_acl_handle_exit(void);
59069+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59070+int gr_acl_handle_procpidmem(const struct task_struct *task);
59071+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59072+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59073+void gr_audit_ptrace(struct task_struct *task);
59074+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59075+
59076+#ifdef CONFIG_GRKERNSEC
59077+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59078+void gr_handle_vm86(void);
59079+void gr_handle_mem_readwrite(u64 from, u64 to);
59080+
59081+extern int grsec_enable_dmesg;
59082+extern int grsec_disable_privio;
59083+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59084+extern int grsec_enable_chroot_findtask;
59085+#endif
59086+#endif
59087+
59088+#endif
59089diff -urNp linux-3.1.1/include/linux/grsock.h linux-3.1.1/include/linux/grsock.h
59090--- linux-3.1.1/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
59091+++ linux-3.1.1/include/linux/grsock.h 2011-11-16 18:40:31.000000000 -0500
59092@@ -0,0 +1,19 @@
59093+#ifndef __GRSOCK_H
59094+#define __GRSOCK_H
59095+
59096+extern void gr_attach_curr_ip(const struct sock *sk);
59097+extern int gr_handle_sock_all(const int family, const int type,
59098+ const int protocol);
59099+extern int gr_handle_sock_server(const struct sockaddr *sck);
59100+extern int gr_handle_sock_server_other(const struct sock *sck);
59101+extern int gr_handle_sock_client(const struct sockaddr *sck);
59102+extern int gr_search_connect(struct socket * sock,
59103+ struct sockaddr_in * addr);
59104+extern int gr_search_bind(struct socket * sock,
59105+ struct sockaddr_in * addr);
59106+extern int gr_search_listen(struct socket * sock);
59107+extern int gr_search_accept(struct socket * sock);
59108+extern int gr_search_socket(const int domain, const int type,
59109+ const int protocol);
59110+
59111+#endif
59112diff -urNp linux-3.1.1/include/linux/hid.h linux-3.1.1/include/linux/hid.h
59113--- linux-3.1.1/include/linux/hid.h 2011-11-11 15:19:27.000000000 -0500
59114+++ linux-3.1.1/include/linux/hid.h 2011-11-16 18:39:08.000000000 -0500
59115@@ -676,7 +676,7 @@ struct hid_ll_driver {
59116 unsigned int code, int value);
59117
59118 int (*parse)(struct hid_device *hdev);
59119-};
59120+} __no_const;
59121
59122 #define PM_HINT_FULLON 1<<5
59123 #define PM_HINT_NORMAL 1<<1
59124diff -urNp linux-3.1.1/include/linux/highmem.h linux-3.1.1/include/linux/highmem.h
59125--- linux-3.1.1/include/linux/highmem.h 2011-11-11 15:19:27.000000000 -0500
59126+++ linux-3.1.1/include/linux/highmem.h 2011-11-16 18:39:08.000000000 -0500
59127@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
59128 kunmap_atomic(kaddr, KM_USER0);
59129 }
59130
59131+static inline void sanitize_highpage(struct page *page)
59132+{
59133+ void *kaddr;
59134+ unsigned long flags;
59135+
59136+ local_irq_save(flags);
59137+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59138+ clear_page(kaddr);
59139+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59140+ local_irq_restore(flags);
59141+}
59142+
59143 static inline void zero_user_segments(struct page *page,
59144 unsigned start1, unsigned end1,
59145 unsigned start2, unsigned end2)
59146diff -urNp linux-3.1.1/include/linux/i2c.h linux-3.1.1/include/linux/i2c.h
59147--- linux-3.1.1/include/linux/i2c.h 2011-11-11 15:19:27.000000000 -0500
59148+++ linux-3.1.1/include/linux/i2c.h 2011-11-16 18:39:08.000000000 -0500
59149@@ -346,6 +346,7 @@ struct i2c_algorithm {
59150 /* To determine what the adapter supports */
59151 u32 (*functionality) (struct i2c_adapter *);
59152 };
59153+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59154
59155 /*
59156 * i2c_adapter is the structure used to identify a physical i2c bus along
59157diff -urNp linux-3.1.1/include/linux/i2o.h linux-3.1.1/include/linux/i2o.h
59158--- linux-3.1.1/include/linux/i2o.h 2011-11-11 15:19:27.000000000 -0500
59159+++ linux-3.1.1/include/linux/i2o.h 2011-11-16 18:39:08.000000000 -0500
59160@@ -564,7 +564,7 @@ struct i2o_controller {
59161 struct i2o_device *exec; /* Executive */
59162 #if BITS_PER_LONG == 64
59163 spinlock_t context_list_lock; /* lock for context_list */
59164- atomic_t context_list_counter; /* needed for unique contexts */
59165+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59166 struct list_head context_list; /* list of context id's
59167 and pointers */
59168 #endif
59169diff -urNp linux-3.1.1/include/linux/init.h linux-3.1.1/include/linux/init.h
59170--- linux-3.1.1/include/linux/init.h 2011-11-11 15:19:27.000000000 -0500
59171+++ linux-3.1.1/include/linux/init.h 2011-11-16 18:39:08.000000000 -0500
59172@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
59173
59174 /* Each module must use one module_init(). */
59175 #define module_init(initfn) \
59176- static inline initcall_t __inittest(void) \
59177+ static inline __used initcall_t __inittest(void) \
59178 { return initfn; } \
59179 int init_module(void) __attribute__((alias(#initfn)));
59180
59181 /* This is only required if you want to be unloadable. */
59182 #define module_exit(exitfn) \
59183- static inline exitcall_t __exittest(void) \
59184+ static inline __used exitcall_t __exittest(void) \
59185 { return exitfn; } \
59186 void cleanup_module(void) __attribute__((alias(#exitfn)));
59187
59188diff -urNp linux-3.1.1/include/linux/init_task.h linux-3.1.1/include/linux/init_task.h
59189--- linux-3.1.1/include/linux/init_task.h 2011-11-11 15:19:27.000000000 -0500
59190+++ linux-3.1.1/include/linux/init_task.h 2011-11-16 18:39:08.000000000 -0500
59191@@ -126,6 +126,12 @@ extern struct cred init_cred;
59192 # define INIT_PERF_EVENTS(tsk)
59193 #endif
59194
59195+#ifdef CONFIG_X86
59196+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59197+#else
59198+#define INIT_TASK_THREAD_INFO
59199+#endif
59200+
59201 /*
59202 * INIT_TASK is used to set up the first task table, touch at
59203 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59204@@ -164,6 +170,7 @@ extern struct cred init_cred;
59205 RCU_INIT_POINTER(.cred, &init_cred), \
59206 .comm = "swapper", \
59207 .thread = INIT_THREAD, \
59208+ INIT_TASK_THREAD_INFO \
59209 .fs = &init_fs, \
59210 .files = &init_files, \
59211 .signal = &init_signals, \
59212diff -urNp linux-3.1.1/include/linux/intel-iommu.h linux-3.1.1/include/linux/intel-iommu.h
59213--- linux-3.1.1/include/linux/intel-iommu.h 2011-11-11 15:19:27.000000000 -0500
59214+++ linux-3.1.1/include/linux/intel-iommu.h 2011-11-16 18:39:08.000000000 -0500
59215@@ -296,7 +296,7 @@ struct iommu_flush {
59216 u8 fm, u64 type);
59217 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59218 unsigned int size_order, u64 type);
59219-};
59220+} __no_const;
59221
59222 enum {
59223 SR_DMAR_FECTL_REG,
59224diff -urNp linux-3.1.1/include/linux/interrupt.h linux-3.1.1/include/linux/interrupt.h
59225--- linux-3.1.1/include/linux/interrupt.h 2011-11-11 15:19:27.000000000 -0500
59226+++ linux-3.1.1/include/linux/interrupt.h 2011-11-16 18:39:08.000000000 -0500
59227@@ -425,7 +425,7 @@ enum
59228 /* map softirq index to softirq name. update 'softirq_to_name' in
59229 * kernel/softirq.c when adding a new softirq.
59230 */
59231-extern char *softirq_to_name[NR_SOFTIRQS];
59232+extern const char * const softirq_to_name[NR_SOFTIRQS];
59233
59234 /* softirq mask and active fields moved to irq_cpustat_t in
59235 * asm/hardirq.h to get better cache usage. KAO
59236@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59237
59238 struct softirq_action
59239 {
59240- void (*action)(struct softirq_action *);
59241+ void (*action)(void);
59242 };
59243
59244 asmlinkage void do_softirq(void);
59245 asmlinkage void __do_softirq(void);
59246-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59247+extern void open_softirq(int nr, void (*action)(void));
59248 extern void softirq_init(void);
59249 static inline void __raise_softirq_irqoff(unsigned int nr)
59250 {
59251diff -urNp linux-3.1.1/include/linux/kallsyms.h linux-3.1.1/include/linux/kallsyms.h
59252--- linux-3.1.1/include/linux/kallsyms.h 2011-11-11 15:19:27.000000000 -0500
59253+++ linux-3.1.1/include/linux/kallsyms.h 2011-11-16 18:40:31.000000000 -0500
59254@@ -15,7 +15,8 @@
59255
59256 struct module;
59257
59258-#ifdef CONFIG_KALLSYMS
59259+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59260+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59261 /* Lookup the address for a symbol. Returns 0 if not found. */
59262 unsigned long kallsyms_lookup_name(const char *name);
59263
59264@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59265 /* Stupid that this does nothing, but I didn't create this mess. */
59266 #define __print_symbol(fmt, addr)
59267 #endif /*CONFIG_KALLSYMS*/
59268+#else /* when included by kallsyms.c, vsnprintf.c, or
59269+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59270+extern void __print_symbol(const char *fmt, unsigned long address);
59271+extern int sprint_backtrace(char *buffer, unsigned long address);
59272+extern int sprint_symbol(char *buffer, unsigned long address);
59273+const char *kallsyms_lookup(unsigned long addr,
59274+ unsigned long *symbolsize,
59275+ unsigned long *offset,
59276+ char **modname, char *namebuf);
59277+#endif
59278
59279 /* This macro allows us to keep printk typechecking */
59280 static void __check_printsym_format(const char *fmt, ...)
59281diff -urNp linux-3.1.1/include/linux/kgdb.h linux-3.1.1/include/linux/kgdb.h
59282--- linux-3.1.1/include/linux/kgdb.h 2011-11-11 15:19:27.000000000 -0500
59283+++ linux-3.1.1/include/linux/kgdb.h 2011-11-16 18:39:08.000000000 -0500
59284@@ -53,7 +53,7 @@ extern int kgdb_connected;
59285 extern int kgdb_io_module_registered;
59286
59287 extern atomic_t kgdb_setting_breakpoint;
59288-extern atomic_t kgdb_cpu_doing_single_step;
59289+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59290
59291 extern struct task_struct *kgdb_usethread;
59292 extern struct task_struct *kgdb_contthread;
59293@@ -251,7 +251,7 @@ struct kgdb_arch {
59294 void (*disable_hw_break)(struct pt_regs *regs);
59295 void (*remove_all_hw_break)(void);
59296 void (*correct_hw_break)(void);
59297-};
59298+} __do_const;
59299
59300 /**
59301 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59302@@ -276,7 +276,7 @@ struct kgdb_io {
59303 void (*pre_exception) (void);
59304 void (*post_exception) (void);
59305 int is_console;
59306-};
59307+} __do_const;
59308
59309 extern struct kgdb_arch arch_kgdb_ops;
59310
59311diff -urNp linux-3.1.1/include/linux/kmod.h linux-3.1.1/include/linux/kmod.h
59312--- linux-3.1.1/include/linux/kmod.h 2011-11-11 15:19:27.000000000 -0500
59313+++ linux-3.1.1/include/linux/kmod.h 2011-11-16 18:40:31.000000000 -0500
59314@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59315 * usually useless though. */
59316 extern int __request_module(bool wait, const char *name, ...) \
59317 __attribute__((format(printf, 2, 3)));
59318+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59319+ __attribute__((format(printf, 3, 4)));
59320 #define request_module(mod...) __request_module(true, mod)
59321 #define request_module_nowait(mod...) __request_module(false, mod)
59322 #define try_then_request_module(x, mod...) \
59323diff -urNp linux-3.1.1/include/linux/kvm_host.h linux-3.1.1/include/linux/kvm_host.h
59324--- linux-3.1.1/include/linux/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
59325+++ linux-3.1.1/include/linux/kvm_host.h 2011-11-16 18:39:08.000000000 -0500
59326@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59327 void vcpu_load(struct kvm_vcpu *vcpu);
59328 void vcpu_put(struct kvm_vcpu *vcpu);
59329
59330-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59331+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59332 struct module *module);
59333 void kvm_exit(void);
59334
59335@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59336 struct kvm_guest_debug *dbg);
59337 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59338
59339-int kvm_arch_init(void *opaque);
59340+int kvm_arch_init(const void *opaque);
59341 void kvm_arch_exit(void);
59342
59343 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59344diff -urNp linux-3.1.1/include/linux/libata.h linux-3.1.1/include/linux/libata.h
59345--- linux-3.1.1/include/linux/libata.h 2011-11-11 15:19:27.000000000 -0500
59346+++ linux-3.1.1/include/linux/libata.h 2011-11-16 18:39:08.000000000 -0500
59347@@ -909,7 +909,7 @@ struct ata_port_operations {
59348 * fields must be pointers.
59349 */
59350 const struct ata_port_operations *inherits;
59351-};
59352+} __do_const;
59353
59354 struct ata_port_info {
59355 unsigned long flags;
59356diff -urNp linux-3.1.1/include/linux/mca.h linux-3.1.1/include/linux/mca.h
59357--- linux-3.1.1/include/linux/mca.h 2011-11-11 15:19:27.000000000 -0500
59358+++ linux-3.1.1/include/linux/mca.h 2011-11-16 18:39:08.000000000 -0500
59359@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59360 int region);
59361 void * (*mca_transform_memory)(struct mca_device *,
59362 void *memory);
59363-};
59364+} __no_const;
59365
59366 struct mca_bus {
59367 u64 default_dma_mask;
59368diff -urNp linux-3.1.1/include/linux/memory.h linux-3.1.1/include/linux/memory.h
59369--- linux-3.1.1/include/linux/memory.h 2011-11-11 15:19:27.000000000 -0500
59370+++ linux-3.1.1/include/linux/memory.h 2011-11-16 18:39:08.000000000 -0500
59371@@ -144,7 +144,7 @@ struct memory_accessor {
59372 size_t count);
59373 ssize_t (*write)(struct memory_accessor *, const char *buf,
59374 off_t offset, size_t count);
59375-};
59376+} __no_const;
59377
59378 /*
59379 * Kernel text modification mutex, used for code patching. Users of this lock
59380diff -urNp linux-3.1.1/include/linux/mfd/abx500.h linux-3.1.1/include/linux/mfd/abx500.h
59381--- linux-3.1.1/include/linux/mfd/abx500.h 2011-11-11 15:19:27.000000000 -0500
59382+++ linux-3.1.1/include/linux/mfd/abx500.h 2011-11-16 18:39:08.000000000 -0500
59383@@ -234,6 +234,7 @@ struct abx500_ops {
59384 int (*event_registers_startup_state_get) (struct device *, u8 *);
59385 int (*startup_irq_enabled) (struct device *, unsigned int);
59386 };
59387+typedef struct abx500_ops __no_const abx500_ops_no_const;
59388
59389 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59390 void abx500_remove_ops(struct device *dev);
59391diff -urNp linux-3.1.1/include/linux/mm.h linux-3.1.1/include/linux/mm.h
59392--- linux-3.1.1/include/linux/mm.h 2011-11-11 15:19:27.000000000 -0500
59393+++ linux-3.1.1/include/linux/mm.h 2011-11-16 18:39:08.000000000 -0500
59394@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void
59395
59396 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59397 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59398+
59399+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59400+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59401+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59402+#else
59403 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59404+#endif
59405+
59406 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59407 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59408
59409@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
59410 int set_page_dirty_lock(struct page *page);
59411 int clear_page_dirty_for_io(struct page *page);
59412
59413-/* Is the vma a continuation of the stack vma above it? */
59414-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59415-{
59416- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59417-}
59418-
59419-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59420- unsigned long addr)
59421-{
59422- return (vma->vm_flags & VM_GROWSDOWN) &&
59423- (vma->vm_start == addr) &&
59424- !vma_growsdown(vma->vm_prev, addr);
59425-}
59426-
59427-/* Is the vma a continuation of the stack vma below it? */
59428-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59429-{
59430- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59431-}
59432-
59433-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59434- unsigned long addr)
59435-{
59436- return (vma->vm_flags & VM_GROWSUP) &&
59437- (vma->vm_end == addr) &&
59438- !vma_growsup(vma->vm_next, addr);
59439-}
59440-
59441 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59442 unsigned long old_addr, struct vm_area_struct *new_vma,
59443 unsigned long new_addr, unsigned long len);
59444@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct ta
59445 }
59446 #endif
59447
59448+#ifdef CONFIG_MMU
59449+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59450+#else
59451+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59452+{
59453+ return __pgprot(0);
59454+}
59455+#endif
59456+
59457 int vma_wants_writenotify(struct vm_area_struct *vma);
59458
59459 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59460@@ -1417,6 +1405,7 @@ out:
59461 }
59462
59463 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59464+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59465
59466 extern unsigned long do_brk(unsigned long, unsigned long);
59467
59468@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(
59469 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59470 struct vm_area_struct **pprev);
59471
59472+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59473+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59474+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59475+
59476 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59477 NULL if none. Assume start_addr < end_addr. */
59478 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59479@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(st
59480 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59481 }
59482
59483-#ifdef CONFIG_MMU
59484-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59485-#else
59486-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59487-{
59488- return __pgprot(0);
59489-}
59490-#endif
59491-
59492 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59493 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59494 unsigned long pfn, unsigned long size, pgprot_t);
59495@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long
59496 extern int sysctl_memory_failure_early_kill;
59497 extern int sysctl_memory_failure_recovery;
59498 extern void shake_page(struct page *p, int access);
59499-extern atomic_long_t mce_bad_pages;
59500+extern atomic_long_unchecked_t mce_bad_pages;
59501 extern int soft_offline_page(struct page *page, int flags);
59502
59503 extern void dump_page(struct page *page);
59504@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct p
59505 unsigned int pages_per_huge_page);
59506 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59507
59508+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59509+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59510+#else
59511+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59512+#endif
59513+
59514 #endif /* __KERNEL__ */
59515 #endif /* _LINUX_MM_H */
59516diff -urNp linux-3.1.1/include/linux/mm_types.h linux-3.1.1/include/linux/mm_types.h
59517--- linux-3.1.1/include/linux/mm_types.h 2011-11-11 15:19:27.000000000 -0500
59518+++ linux-3.1.1/include/linux/mm_types.h 2011-11-16 18:39:08.000000000 -0500
59519@@ -230,6 +230,8 @@ struct vm_area_struct {
59520 #ifdef CONFIG_NUMA
59521 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59522 #endif
59523+
59524+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59525 };
59526
59527 struct core_thread {
59528@@ -362,6 +364,24 @@ struct mm_struct {
59529 #ifdef CONFIG_CPUMASK_OFFSTACK
59530 struct cpumask cpumask_allocation;
59531 #endif
59532+
59533+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59534+ unsigned long pax_flags;
59535+#endif
59536+
59537+#ifdef CONFIG_PAX_DLRESOLVE
59538+ unsigned long call_dl_resolve;
59539+#endif
59540+
59541+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59542+ unsigned long call_syscall;
59543+#endif
59544+
59545+#ifdef CONFIG_PAX_ASLR
59546+ unsigned long delta_mmap; /* randomized offset */
59547+ unsigned long delta_stack; /* randomized offset */
59548+#endif
59549+
59550 };
59551
59552 static inline void mm_init_cpumask(struct mm_struct *mm)
59553diff -urNp linux-3.1.1/include/linux/mmu_notifier.h linux-3.1.1/include/linux/mmu_notifier.h
59554--- linux-3.1.1/include/linux/mmu_notifier.h 2011-11-11 15:19:27.000000000 -0500
59555+++ linux-3.1.1/include/linux/mmu_notifier.h 2011-11-16 18:39:08.000000000 -0500
59556@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59557 */
59558 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59559 ({ \
59560- pte_t __pte; \
59561+ pte_t ___pte; \
59562 struct vm_area_struct *___vma = __vma; \
59563 unsigned long ___address = __address; \
59564- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59565+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59566 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59567- __pte; \
59568+ ___pte; \
59569 })
59570
59571 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59572diff -urNp linux-3.1.1/include/linux/mmzone.h linux-3.1.1/include/linux/mmzone.h
59573--- linux-3.1.1/include/linux/mmzone.h 2011-11-11 15:19:27.000000000 -0500
59574+++ linux-3.1.1/include/linux/mmzone.h 2011-11-16 18:39:08.000000000 -0500
59575@@ -356,7 +356,7 @@ struct zone {
59576 unsigned long flags; /* zone flags, see below */
59577
59578 /* Zone statistics */
59579- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59580+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59581
59582 /*
59583 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59584diff -urNp linux-3.1.1/include/linux/mod_devicetable.h linux-3.1.1/include/linux/mod_devicetable.h
59585--- linux-3.1.1/include/linux/mod_devicetable.h 2011-11-11 15:19:27.000000000 -0500
59586+++ linux-3.1.1/include/linux/mod_devicetable.h 2011-11-16 18:39:08.000000000 -0500
59587@@ -12,7 +12,7 @@
59588 typedef unsigned long kernel_ulong_t;
59589 #endif
59590
59591-#define PCI_ANY_ID (~0)
59592+#define PCI_ANY_ID ((__u16)~0)
59593
59594 struct pci_device_id {
59595 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59596@@ -131,7 +131,7 @@ struct usb_device_id {
59597 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59598 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59599
59600-#define HID_ANY_ID (~0)
59601+#define HID_ANY_ID (~0U)
59602
59603 struct hid_device_id {
59604 __u16 bus;
59605diff -urNp linux-3.1.1/include/linux/module.h linux-3.1.1/include/linux/module.h
59606--- linux-3.1.1/include/linux/module.h 2011-11-11 15:19:27.000000000 -0500
59607+++ linux-3.1.1/include/linux/module.h 2011-11-16 18:39:08.000000000 -0500
59608@@ -16,6 +16,7 @@
59609 #include <linux/kobject.h>
59610 #include <linux/moduleparam.h>
59611 #include <linux/tracepoint.h>
59612+#include <linux/fs.h>
59613
59614 #include <linux/percpu.h>
59615 #include <asm/module.h>
59616@@ -327,19 +328,16 @@ struct module
59617 int (*init)(void);
59618
59619 /* If this is non-NULL, vfree after init() returns */
59620- void *module_init;
59621+ void *module_init_rx, *module_init_rw;
59622
59623 /* Here is the actual code + data, vfree'd on unload. */
59624- void *module_core;
59625+ void *module_core_rx, *module_core_rw;
59626
59627 /* Here are the sizes of the init and core sections */
59628- unsigned int init_size, core_size;
59629+ unsigned int init_size_rw, core_size_rw;
59630
59631 /* The size of the executable code in each section. */
59632- unsigned int init_text_size, core_text_size;
59633-
59634- /* Size of RO sections of the module (text+rodata) */
59635- unsigned int init_ro_size, core_ro_size;
59636+ unsigned int init_size_rx, core_size_rx;
59637
59638 /* Arch-specific module values */
59639 struct mod_arch_specific arch;
59640@@ -395,6 +393,10 @@ struct module
59641 #ifdef CONFIG_EVENT_TRACING
59642 struct ftrace_event_call **trace_events;
59643 unsigned int num_trace_events;
59644+ struct file_operations trace_id;
59645+ struct file_operations trace_enable;
59646+ struct file_operations trace_format;
59647+ struct file_operations trace_filter;
59648 #endif
59649 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59650 unsigned int num_ftrace_callsites;
59651@@ -445,16 +447,46 @@ bool is_module_address(unsigned long add
59652 bool is_module_percpu_address(unsigned long addr);
59653 bool is_module_text_address(unsigned long addr);
59654
59655+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59656+{
59657+
59658+#ifdef CONFIG_PAX_KERNEXEC
59659+ if (ktla_ktva(addr) >= (unsigned long)start &&
59660+ ktla_ktva(addr) < (unsigned long)start + size)
59661+ return 1;
59662+#endif
59663+
59664+ return ((void *)addr >= start && (void *)addr < start + size);
59665+}
59666+
59667+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59668+{
59669+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59670+}
59671+
59672+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59673+{
59674+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59675+}
59676+
59677+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59678+{
59679+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59680+}
59681+
59682+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59683+{
59684+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59685+}
59686+
59687 static inline int within_module_core(unsigned long addr, struct module *mod)
59688 {
59689- return (unsigned long)mod->module_core <= addr &&
59690- addr < (unsigned long)mod->module_core + mod->core_size;
59691+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59692 }
59693
59694 static inline int within_module_init(unsigned long addr, struct module *mod)
59695 {
59696- return (unsigned long)mod->module_init <= addr &&
59697- addr < (unsigned long)mod->module_init + mod->init_size;
59698+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59699 }
59700
59701 /* Search for module by name: must hold module_mutex. */
59702diff -urNp linux-3.1.1/include/linux/moduleloader.h linux-3.1.1/include/linux/moduleloader.h
59703--- linux-3.1.1/include/linux/moduleloader.h 2011-11-11 15:19:27.000000000 -0500
59704+++ linux-3.1.1/include/linux/moduleloader.h 2011-11-16 18:39:08.000000000 -0500
59705@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
59706 sections. Returns NULL on failure. */
59707 void *module_alloc(unsigned long size);
59708
59709+#ifdef CONFIG_PAX_KERNEXEC
59710+void *module_alloc_exec(unsigned long size);
59711+#else
59712+#define module_alloc_exec(x) module_alloc(x)
59713+#endif
59714+
59715 /* Free memory returned from module_alloc. */
59716 void module_free(struct module *mod, void *module_region);
59717
59718+#ifdef CONFIG_PAX_KERNEXEC
59719+void module_free_exec(struct module *mod, void *module_region);
59720+#else
59721+#define module_free_exec(x, y) module_free((x), (y))
59722+#endif
59723+
59724 /* Apply the given relocation to the (simplified) ELF. Return -error
59725 or 0. */
59726 int apply_relocate(Elf_Shdr *sechdrs,
59727diff -urNp linux-3.1.1/include/linux/moduleparam.h linux-3.1.1/include/linux/moduleparam.h
59728--- linux-3.1.1/include/linux/moduleparam.h 2011-11-11 15:19:27.000000000 -0500
59729+++ linux-3.1.1/include/linux/moduleparam.h 2011-11-16 18:39:08.000000000 -0500
59730@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59731 * @len is usually just sizeof(string).
59732 */
59733 #define module_param_string(name, string, len, perm) \
59734- static const struct kparam_string __param_string_##name \
59735+ static const struct kparam_string __param_string_##name __used \
59736 = { len, string }; \
59737 __module_param_call(MODULE_PARAM_PREFIX, name, \
59738 &param_ops_string, \
59739@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59740 * module_param_named() for why this might be necessary.
59741 */
59742 #define module_param_array_named(name, array, type, nump, perm) \
59743- static const struct kparam_array __param_arr_##name \
59744+ static const struct kparam_array __param_arr_##name __used \
59745 = { .max = ARRAY_SIZE(array), .num = nump, \
59746 .ops = &param_ops_##type, \
59747 .elemsize = sizeof(array[0]), .elem = array }; \
59748diff -urNp linux-3.1.1/include/linux/namei.h linux-3.1.1/include/linux/namei.h
59749--- linux-3.1.1/include/linux/namei.h 2011-11-11 15:19:27.000000000 -0500
59750+++ linux-3.1.1/include/linux/namei.h 2011-11-16 18:39:08.000000000 -0500
59751@@ -24,7 +24,7 @@ struct nameidata {
59752 unsigned seq;
59753 int last_type;
59754 unsigned depth;
59755- char *saved_names[MAX_NESTED_LINKS + 1];
59756+ const char *saved_names[MAX_NESTED_LINKS + 1];
59757
59758 /* Intent data */
59759 union {
59760@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59761 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59762 extern void unlock_rename(struct dentry *, struct dentry *);
59763
59764-static inline void nd_set_link(struct nameidata *nd, char *path)
59765+static inline void nd_set_link(struct nameidata *nd, const char *path)
59766 {
59767 nd->saved_names[nd->depth] = path;
59768 }
59769
59770-static inline char *nd_get_link(struct nameidata *nd)
59771+static inline const char *nd_get_link(const struct nameidata *nd)
59772 {
59773 return nd->saved_names[nd->depth];
59774 }
59775diff -urNp linux-3.1.1/include/linux/netdevice.h linux-3.1.1/include/linux/netdevice.h
59776--- linux-3.1.1/include/linux/netdevice.h 2011-11-11 15:19:27.000000000 -0500
59777+++ linux-3.1.1/include/linux/netdevice.h 2011-11-16 18:39:08.000000000 -0500
59778@@ -944,6 +944,7 @@ struct net_device_ops {
59779 int (*ndo_set_features)(struct net_device *dev,
59780 u32 features);
59781 };
59782+typedef struct net_device_ops __no_const net_device_ops_no_const;
59783
59784 /*
59785 * The DEVICE structure.
59786diff -urNp linux-3.1.1/include/linux/netfilter/xt_gradm.h linux-3.1.1/include/linux/netfilter/xt_gradm.h
59787--- linux-3.1.1/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59788+++ linux-3.1.1/include/linux/netfilter/xt_gradm.h 2011-11-16 18:40:31.000000000 -0500
59789@@ -0,0 +1,9 @@
59790+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59791+#define _LINUX_NETFILTER_XT_GRADM_H 1
59792+
59793+struct xt_gradm_mtinfo {
59794+ __u16 flags;
59795+ __u16 invflags;
59796+};
59797+
59798+#endif
59799diff -urNp linux-3.1.1/include/linux/of_pdt.h linux-3.1.1/include/linux/of_pdt.h
59800--- linux-3.1.1/include/linux/of_pdt.h 2011-11-11 15:19:27.000000000 -0500
59801+++ linux-3.1.1/include/linux/of_pdt.h 2011-11-16 18:39:08.000000000 -0500
59802@@ -32,7 +32,7 @@ struct of_pdt_ops {
59803
59804 /* return 0 on success; fill in 'len' with number of bytes in path */
59805 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59806-};
59807+} __no_const;
59808
59809 extern void *prom_early_alloc(unsigned long size);
59810
59811diff -urNp linux-3.1.1/include/linux/oprofile.h linux-3.1.1/include/linux/oprofile.h
59812--- linux-3.1.1/include/linux/oprofile.h 2011-11-11 15:19:27.000000000 -0500
59813+++ linux-3.1.1/include/linux/oprofile.h 2011-11-16 18:39:08.000000000 -0500
59814@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59815 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59816 char const * name, ulong * val);
59817
59818-/** Create a file for read-only access to an atomic_t. */
59819+/** Create a file for read-only access to an atomic_unchecked_t. */
59820 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59821- char const * name, atomic_t * val);
59822+ char const * name, atomic_unchecked_t * val);
59823
59824 /** create a directory */
59825 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59826diff -urNp linux-3.1.1/include/linux/padata.h linux-3.1.1/include/linux/padata.h
59827--- linux-3.1.1/include/linux/padata.h 2011-11-11 15:19:27.000000000 -0500
59828+++ linux-3.1.1/include/linux/padata.h 2011-11-16 18:39:08.000000000 -0500
59829@@ -129,7 +129,7 @@ struct parallel_data {
59830 struct padata_instance *pinst;
59831 struct padata_parallel_queue __percpu *pqueue;
59832 struct padata_serial_queue __percpu *squeue;
59833- atomic_t seq_nr;
59834+ atomic_unchecked_t seq_nr;
59835 atomic_t reorder_objects;
59836 atomic_t refcnt;
59837 unsigned int max_seq_nr;
59838diff -urNp linux-3.1.1/include/linux/perf_event.h linux-3.1.1/include/linux/perf_event.h
59839--- linux-3.1.1/include/linux/perf_event.h 2011-11-11 15:19:27.000000000 -0500
59840+++ linux-3.1.1/include/linux/perf_event.h 2011-11-16 18:39:08.000000000 -0500
59841@@ -745,8 +745,8 @@ struct perf_event {
59842
59843 enum perf_event_active_state state;
59844 unsigned int attach_state;
59845- local64_t count;
59846- atomic64_t child_count;
59847+ local64_t count; /* PaX: fix it one day */
59848+ atomic64_unchecked_t child_count;
59849
59850 /*
59851 * These are the total time in nanoseconds that the event
59852@@ -797,8 +797,8 @@ struct perf_event {
59853 * These accumulate total time (in nanoseconds) that children
59854 * events have been enabled and running, respectively.
59855 */
59856- atomic64_t child_total_time_enabled;
59857- atomic64_t child_total_time_running;
59858+ atomic64_unchecked_t child_total_time_enabled;
59859+ atomic64_unchecked_t child_total_time_running;
59860
59861 /*
59862 * Protect attach/detach and child_list:
59863diff -urNp linux-3.1.1/include/linux/pipe_fs_i.h linux-3.1.1/include/linux/pipe_fs_i.h
59864--- linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-11 15:19:27.000000000 -0500
59865+++ linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-16 18:39:08.000000000 -0500
59866@@ -46,9 +46,9 @@ struct pipe_buffer {
59867 struct pipe_inode_info {
59868 wait_queue_head_t wait;
59869 unsigned int nrbufs, curbuf, buffers;
59870- unsigned int readers;
59871- unsigned int writers;
59872- unsigned int waiting_writers;
59873+ atomic_t readers;
59874+ atomic_t writers;
59875+ atomic_t waiting_writers;
59876 unsigned int r_counter;
59877 unsigned int w_counter;
59878 struct page *tmp_page;
59879diff -urNp linux-3.1.1/include/linux/pm_runtime.h linux-3.1.1/include/linux/pm_runtime.h
59880--- linux-3.1.1/include/linux/pm_runtime.h 2011-11-11 15:19:27.000000000 -0500
59881+++ linux-3.1.1/include/linux/pm_runtime.h 2011-11-16 18:39:08.000000000 -0500
59882@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_
59883
59884 static inline void pm_runtime_mark_last_busy(struct device *dev)
59885 {
59886- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59887+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59888 }
59889
59890 #else /* !CONFIG_PM_RUNTIME */
59891diff -urNp linux-3.1.1/include/linux/poison.h linux-3.1.1/include/linux/poison.h
59892--- linux-3.1.1/include/linux/poison.h 2011-11-11 15:19:27.000000000 -0500
59893+++ linux-3.1.1/include/linux/poison.h 2011-11-16 18:39:08.000000000 -0500
59894@@ -19,8 +19,8 @@
59895 * under normal circumstances, used to verify that nobody uses
59896 * non-initialized list entries.
59897 */
59898-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59899-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59900+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59901+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59902
59903 /********** include/linux/timer.h **********/
59904 /*
59905diff -urNp linux-3.1.1/include/linux/preempt.h linux-3.1.1/include/linux/preempt.h
59906--- linux-3.1.1/include/linux/preempt.h 2011-11-11 15:19:27.000000000 -0500
59907+++ linux-3.1.1/include/linux/preempt.h 2011-11-16 18:39:08.000000000 -0500
59908@@ -123,7 +123,7 @@ struct preempt_ops {
59909 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59910 void (*sched_out)(struct preempt_notifier *notifier,
59911 struct task_struct *next);
59912-};
59913+} __no_const;
59914
59915 /**
59916 * preempt_notifier - key for installing preemption notifiers
59917diff -urNp linux-3.1.1/include/linux/proc_fs.h linux-3.1.1/include/linux/proc_fs.h
59918--- linux-3.1.1/include/linux/proc_fs.h 2011-11-11 15:19:27.000000000 -0500
59919+++ linux-3.1.1/include/linux/proc_fs.h 2011-11-16 18:40:31.000000000 -0500
59920@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59921 return proc_create_data(name, mode, parent, proc_fops, NULL);
59922 }
59923
59924+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59925+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59926+{
59927+#ifdef CONFIG_GRKERNSEC_PROC_USER
59928+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59929+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59930+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59931+#else
59932+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59933+#endif
59934+}
59935+
59936+
59937 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59938 mode_t mode, struct proc_dir_entry *base,
59939 read_proc_t *read_proc, void * data)
59940@@ -258,7 +271,7 @@ union proc_op {
59941 int (*proc_show)(struct seq_file *m,
59942 struct pid_namespace *ns, struct pid *pid,
59943 struct task_struct *task);
59944-};
59945+} __no_const;
59946
59947 struct ctl_table_header;
59948 struct ctl_table;
59949diff -urNp linux-3.1.1/include/linux/ptrace.h linux-3.1.1/include/linux/ptrace.h
59950--- linux-3.1.1/include/linux/ptrace.h 2011-11-11 15:19:27.000000000 -0500
59951+++ linux-3.1.1/include/linux/ptrace.h 2011-11-16 18:40:31.000000000 -0500
59952@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_
59953 extern void exit_ptrace(struct task_struct *tracer);
59954 #define PTRACE_MODE_READ 1
59955 #define PTRACE_MODE_ATTACH 2
59956-/* Returns 0 on success, -errno on denial. */
59957-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59958 /* Returns true on success, false on denial. */
59959 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59960+/* Returns true on success, false on denial. */
59961+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59962
59963 static inline int ptrace_reparented(struct task_struct *child)
59964 {
59965diff -urNp linux-3.1.1/include/linux/random.h linux-3.1.1/include/linux/random.h
59966--- linux-3.1.1/include/linux/random.h 2011-11-11 15:19:27.000000000 -0500
59967+++ linux-3.1.1/include/linux/random.h 2011-11-16 18:39:08.000000000 -0500
59968@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59969
59970 u32 prandom32(struct rnd_state *);
59971
59972+static inline unsigned long pax_get_random_long(void)
59973+{
59974+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59975+}
59976+
59977 /*
59978 * Handle minimum values for seeds
59979 */
59980 static inline u32 __seed(u32 x, u32 m)
59981 {
59982- return (x < m) ? x + m : x;
59983+ return (x <= m) ? x + m + 1 : x;
59984 }
59985
59986 /**
59987diff -urNp linux-3.1.1/include/linux/reboot.h linux-3.1.1/include/linux/reboot.h
59988--- linux-3.1.1/include/linux/reboot.h 2011-11-11 15:19:27.000000000 -0500
59989+++ linux-3.1.1/include/linux/reboot.h 2011-11-16 18:39:08.000000000 -0500
59990@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
59991 * Architecture-specific implementations of sys_reboot commands.
59992 */
59993
59994-extern void machine_restart(char *cmd);
59995-extern void machine_halt(void);
59996-extern void machine_power_off(void);
59997+extern void machine_restart(char *cmd) __noreturn;
59998+extern void machine_halt(void) __noreturn;
59999+extern void machine_power_off(void) __noreturn;
60000
60001 extern void machine_shutdown(void);
60002 struct pt_regs;
60003@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
60004 */
60005
60006 extern void kernel_restart_prepare(char *cmd);
60007-extern void kernel_restart(char *cmd);
60008-extern void kernel_halt(void);
60009-extern void kernel_power_off(void);
60010+extern void kernel_restart(char *cmd) __noreturn;
60011+extern void kernel_halt(void) __noreturn;
60012+extern void kernel_power_off(void) __noreturn;
60013
60014 extern int C_A_D; /* for sysctl */
60015 void ctrl_alt_del(void);
60016@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60017 * Emergency restart, callable from an interrupt handler.
60018 */
60019
60020-extern void emergency_restart(void);
60021+extern void emergency_restart(void) __noreturn;
60022 #include <asm/emergency-restart.h>
60023
60024 #endif
60025diff -urNp linux-3.1.1/include/linux/reiserfs_fs.h linux-3.1.1/include/linux/reiserfs_fs.h
60026--- linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-11 15:19:27.000000000 -0500
60027+++ linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-16 18:39:08.000000000 -0500
60028@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
60029 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60030
60031 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60032-#define get_generation(s) atomic_read (&fs_generation(s))
60033+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60034 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60035 #define __fs_changed(gen,s) (gen != get_generation (s))
60036 #define fs_changed(gen,s) \
60037diff -urNp linux-3.1.1/include/linux/reiserfs_fs_sb.h linux-3.1.1/include/linux/reiserfs_fs_sb.h
60038--- linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-11 15:19:27.000000000 -0500
60039+++ linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-16 18:39:08.000000000 -0500
60040@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60041 /* Comment? -Hans */
60042 wait_queue_head_t s_wait;
60043 /* To be obsoleted soon by per buffer seals.. -Hans */
60044- atomic_t s_generation_counter; // increased by one every time the
60045+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60046 // tree gets re-balanced
60047 unsigned long s_properties; /* File system properties. Currently holds
60048 on-disk FS format */
60049diff -urNp linux-3.1.1/include/linux/relay.h linux-3.1.1/include/linux/relay.h
60050--- linux-3.1.1/include/linux/relay.h 2011-11-11 15:19:27.000000000 -0500
60051+++ linux-3.1.1/include/linux/relay.h 2011-11-16 18:39:08.000000000 -0500
60052@@ -159,7 +159,7 @@ struct rchan_callbacks
60053 * The callback should return 0 if successful, negative if not.
60054 */
60055 int (*remove_buf_file)(struct dentry *dentry);
60056-};
60057+} __no_const;
60058
60059 /*
60060 * CONFIG_RELAY kernel API, kernel/relay.c
60061diff -urNp linux-3.1.1/include/linux/rfkill.h linux-3.1.1/include/linux/rfkill.h
60062--- linux-3.1.1/include/linux/rfkill.h 2011-11-11 15:19:27.000000000 -0500
60063+++ linux-3.1.1/include/linux/rfkill.h 2011-11-16 18:39:08.000000000 -0500
60064@@ -147,6 +147,7 @@ struct rfkill_ops {
60065 void (*query)(struct rfkill *rfkill, void *data);
60066 int (*set_block)(void *data, bool blocked);
60067 };
60068+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60069
60070 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60071 /**
60072diff -urNp linux-3.1.1/include/linux/rmap.h linux-3.1.1/include/linux/rmap.h
60073--- linux-3.1.1/include/linux/rmap.h 2011-11-11 15:19:27.000000000 -0500
60074+++ linux-3.1.1/include/linux/rmap.h 2011-11-16 18:39:08.000000000 -0500
60075@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
60076 void anon_vma_init(void); /* create anon_vma_cachep */
60077 int anon_vma_prepare(struct vm_area_struct *);
60078 void unlink_anon_vmas(struct vm_area_struct *);
60079-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60080-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60081+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60082+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60083 void __anon_vma_link(struct vm_area_struct *);
60084
60085 static inline void anon_vma_merge(struct vm_area_struct *vma,
60086diff -urNp linux-3.1.1/include/linux/sched.h linux-3.1.1/include/linux/sched.h
60087--- linux-3.1.1/include/linux/sched.h 2011-11-11 15:19:27.000000000 -0500
60088+++ linux-3.1.1/include/linux/sched.h 2011-11-16 18:40:31.000000000 -0500
60089@@ -100,6 +100,7 @@ struct bio_list;
60090 struct fs_struct;
60091 struct perf_event_context;
60092 struct blk_plug;
60093+struct linux_binprm;
60094
60095 /*
60096 * List of flags we want to share for kernel threads,
60097@@ -380,10 +381,13 @@ struct user_namespace;
60098 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60099
60100 extern int sysctl_max_map_count;
60101+extern unsigned long sysctl_heap_stack_gap;
60102
60103 #include <linux/aio.h>
60104
60105 #ifdef CONFIG_MMU
60106+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60107+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60108 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60109 extern unsigned long
60110 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60111@@ -629,6 +633,17 @@ struct signal_struct {
60112 #ifdef CONFIG_TASKSTATS
60113 struct taskstats *stats;
60114 #endif
60115+
60116+#ifdef CONFIG_GRKERNSEC
60117+ u32 curr_ip;
60118+ u32 saved_ip;
60119+ u32 gr_saddr;
60120+ u32 gr_daddr;
60121+ u16 gr_sport;
60122+ u16 gr_dport;
60123+ u8 used_accept:1;
60124+#endif
60125+
60126 #ifdef CONFIG_AUDIT
60127 unsigned audit_tty;
60128 struct tty_audit_buf *tty_audit_buf;
60129@@ -710,6 +725,11 @@ struct user_struct {
60130 struct key *session_keyring; /* UID's default session keyring */
60131 #endif
60132
60133+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60134+ unsigned int banned;
60135+ unsigned long ban_expires;
60136+#endif
60137+
60138 /* Hash table maintenance information */
60139 struct hlist_node uidhash_node;
60140 uid_t uid;
60141@@ -1340,8 +1360,8 @@ struct task_struct {
60142 struct list_head thread_group;
60143
60144 struct completion *vfork_done; /* for vfork() */
60145- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60146- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60147+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60148+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60149
60150 cputime_t utime, stime, utimescaled, stimescaled;
60151 cputime_t gtime;
60152@@ -1357,13 +1377,6 @@ struct task_struct {
60153 struct task_cputime cputime_expires;
60154 struct list_head cpu_timers[3];
60155
60156-/* process credentials */
60157- const struct cred __rcu *real_cred; /* objective and real subjective task
60158- * credentials (COW) */
60159- const struct cred __rcu *cred; /* effective (overridable) subjective task
60160- * credentials (COW) */
60161- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60162-
60163 char comm[TASK_COMM_LEN]; /* executable name excluding path
60164 - access with [gs]et_task_comm (which lock
60165 it with task_lock())
60166@@ -1380,8 +1393,16 @@ struct task_struct {
60167 #endif
60168 /* CPU-specific state of this task */
60169 struct thread_struct thread;
60170+/* thread_info moved to task_struct */
60171+#ifdef CONFIG_X86
60172+ struct thread_info tinfo;
60173+#endif
60174 /* filesystem information */
60175 struct fs_struct *fs;
60176+
60177+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60178+ * credentials (COW) */
60179+
60180 /* open file information */
60181 struct files_struct *files;
60182 /* namespaces */
60183@@ -1428,6 +1449,11 @@ struct task_struct {
60184 struct rt_mutex_waiter *pi_blocked_on;
60185 #endif
60186
60187+/* process credentials */
60188+ const struct cred __rcu *real_cred; /* objective and real subjective task
60189+ * credentials (COW) */
60190+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60191+
60192 #ifdef CONFIG_DEBUG_MUTEXES
60193 /* mutex deadlock detection */
60194 struct mutex_waiter *blocked_on;
60195@@ -1537,6 +1563,21 @@ struct task_struct {
60196 unsigned long default_timer_slack_ns;
60197
60198 struct list_head *scm_work_list;
60199+
60200+#ifdef CONFIG_GRKERNSEC
60201+ /* grsecurity */
60202+ struct dentry *gr_chroot_dentry;
60203+ struct acl_subject_label *acl;
60204+ struct acl_role_label *role;
60205+ struct file *exec_file;
60206+ u16 acl_role_id;
60207+ /* is this the task that authenticated to the special role */
60208+ u8 acl_sp_role;
60209+ u8 is_writable;
60210+ u8 brute;
60211+ u8 gr_is_chrooted;
60212+#endif
60213+
60214 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60215 /* Index of current stored address in ret_stack */
60216 int curr_ret_stack;
60217@@ -1571,6 +1612,57 @@ struct task_struct {
60218 #endif
60219 };
60220
60221+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60222+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60223+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60224+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60225+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60226+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60227+
60228+#ifdef CONFIG_PAX_SOFTMODE
60229+extern int pax_softmode;
60230+#endif
60231+
60232+extern int pax_check_flags(unsigned long *);
60233+
60234+/* if tsk != current then task_lock must be held on it */
60235+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60236+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60237+{
60238+ if (likely(tsk->mm))
60239+ return tsk->mm->pax_flags;
60240+ else
60241+ return 0UL;
60242+}
60243+
60244+/* if tsk != current then task_lock must be held on it */
60245+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60246+{
60247+ if (likely(tsk->mm)) {
60248+ tsk->mm->pax_flags = flags;
60249+ return 0;
60250+ }
60251+ return -EINVAL;
60252+}
60253+#endif
60254+
60255+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60256+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60257+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60258+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60259+#endif
60260+
60261+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60262+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60263+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60264+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60265+
60266+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60267+extern void pax_track_stack(void);
60268+#else
60269+static inline void pax_track_stack(void) {}
60270+#endif
60271+
60272 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60273 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60274
60275@@ -2074,7 +2166,9 @@ void yield(void);
60276 extern struct exec_domain default_exec_domain;
60277
60278 union thread_union {
60279+#ifndef CONFIG_X86
60280 struct thread_info thread_info;
60281+#endif
60282 unsigned long stack[THREAD_SIZE/sizeof(long)];
60283 };
60284
60285@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
60286 */
60287
60288 extern struct task_struct *find_task_by_vpid(pid_t nr);
60289+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60290 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60291 struct pid_namespace *ns);
60292
60293@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sig
60294 extern void exit_itimers(struct signal_struct *);
60295 extern void flush_itimer_signals(void);
60296
60297-extern NORET_TYPE void do_group_exit(int);
60298+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60299
60300 extern void daemonize(const char *, ...);
60301 extern int allow_signal(int);
60302@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stac
60303
60304 #endif
60305
60306-static inline int object_is_on_stack(void *obj)
60307+static inline int object_starts_on_stack(void *obj)
60308 {
60309- void *stack = task_stack_page(current);
60310+ const void *stack = task_stack_page(current);
60311
60312 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60313 }
60314
60315+#ifdef CONFIG_PAX_USERCOPY
60316+extern int object_is_on_stack(const void *obj, unsigned long len);
60317+#endif
60318+
60319 extern void thread_info_cache_init(void);
60320
60321 #ifdef CONFIG_DEBUG_STACK_USAGE
60322diff -urNp linux-3.1.1/include/linux/screen_info.h linux-3.1.1/include/linux/screen_info.h
60323--- linux-3.1.1/include/linux/screen_info.h 2011-11-11 15:19:27.000000000 -0500
60324+++ linux-3.1.1/include/linux/screen_info.h 2011-11-16 18:39:08.000000000 -0500
60325@@ -43,7 +43,8 @@ struct screen_info {
60326 __u16 pages; /* 0x32 */
60327 __u16 vesa_attributes; /* 0x34 */
60328 __u32 capabilities; /* 0x36 */
60329- __u8 _reserved[6]; /* 0x3a */
60330+ __u16 vesapm_size; /* 0x3a */
60331+ __u8 _reserved[4]; /* 0x3c */
60332 } __attribute__((packed));
60333
60334 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60335diff -urNp linux-3.1.1/include/linux/security.h linux-3.1.1/include/linux/security.h
60336--- linux-3.1.1/include/linux/security.h 2011-11-11 15:19:27.000000000 -0500
60337+++ linux-3.1.1/include/linux/security.h 2011-11-16 18:40:31.000000000 -0500
60338@@ -36,6 +36,7 @@
60339 #include <linux/key.h>
60340 #include <linux/xfrm.h>
60341 #include <linux/slab.h>
60342+#include <linux/grsecurity.h>
60343 #include <net/flow.h>
60344
60345 /* Maximum number of letters for an LSM name string */
60346diff -urNp linux-3.1.1/include/linux/seq_file.h linux-3.1.1/include/linux/seq_file.h
60347--- linux-3.1.1/include/linux/seq_file.h 2011-11-11 15:19:27.000000000 -0500
60348+++ linux-3.1.1/include/linux/seq_file.h 2011-11-16 18:39:08.000000000 -0500
60349@@ -33,6 +33,7 @@ struct seq_operations {
60350 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60351 int (*show) (struct seq_file *m, void *v);
60352 };
60353+typedef struct seq_operations __no_const seq_operations_no_const;
60354
60355 #define SEQ_SKIP 1
60356
60357diff -urNp linux-3.1.1/include/linux/shm.h linux-3.1.1/include/linux/shm.h
60358--- linux-3.1.1/include/linux/shm.h 2011-11-11 15:19:27.000000000 -0500
60359+++ linux-3.1.1/include/linux/shm.h 2011-11-16 18:59:58.000000000 -0500
60360@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the ke
60361
60362 /* The task created the shm object. NULL if the task is dead. */
60363 struct task_struct *shm_creator;
60364+#ifdef CONFIG_GRKERNSEC
60365+ time_t shm_createtime;
60366+ pid_t shm_lapid;
60367+#endif
60368 };
60369
60370 /* shm_mode upper byte flags */
60371diff -urNp linux-3.1.1/include/linux/skbuff.h linux-3.1.1/include/linux/skbuff.h
60372--- linux-3.1.1/include/linux/skbuff.h 2011-11-11 15:19:27.000000000 -0500
60373+++ linux-3.1.1/include/linux/skbuff.h 2011-11-16 18:39:08.000000000 -0500
60374@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamp
60375 */
60376 static inline int skb_queue_empty(const struct sk_buff_head *list)
60377 {
60378- return list->next == (struct sk_buff *)list;
60379+ return list->next == (const struct sk_buff *)list;
60380 }
60381
60382 /**
60383@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const
60384 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60385 const struct sk_buff *skb)
60386 {
60387- return skb->next == (struct sk_buff *)list;
60388+ return skb->next == (const struct sk_buff *)list;
60389 }
60390
60391 /**
60392@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(con
60393 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60394 const struct sk_buff *skb)
60395 {
60396- return skb->prev == (struct sk_buff *)list;
60397+ return skb->prev == (const struct sk_buff *)list;
60398 }
60399
60400 /**
60401@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(
60402 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60403 */
60404 #ifndef NET_SKB_PAD
60405-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60406+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60407 #endif
60408
60409 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60410diff -urNp linux-3.1.1/include/linux/slab_def.h linux-3.1.1/include/linux/slab_def.h
60411--- linux-3.1.1/include/linux/slab_def.h 2011-11-11 15:19:27.000000000 -0500
60412+++ linux-3.1.1/include/linux/slab_def.h 2011-11-16 18:39:08.000000000 -0500
60413@@ -68,10 +68,10 @@ struct kmem_cache {
60414 unsigned long node_allocs;
60415 unsigned long node_frees;
60416 unsigned long node_overflow;
60417- atomic_t allochit;
60418- atomic_t allocmiss;
60419- atomic_t freehit;
60420- atomic_t freemiss;
60421+ atomic_unchecked_t allochit;
60422+ atomic_unchecked_t allocmiss;
60423+ atomic_unchecked_t freehit;
60424+ atomic_unchecked_t freemiss;
60425
60426 /*
60427 * If debugging is enabled, then the allocator can add additional
60428diff -urNp linux-3.1.1/include/linux/slab.h linux-3.1.1/include/linux/slab.h
60429--- linux-3.1.1/include/linux/slab.h 2011-11-11 15:19:27.000000000 -0500
60430+++ linux-3.1.1/include/linux/slab.h 2011-11-16 18:39:08.000000000 -0500
60431@@ -11,12 +11,20 @@
60432
60433 #include <linux/gfp.h>
60434 #include <linux/types.h>
60435+#include <linux/err.h>
60436
60437 /*
60438 * Flags to pass to kmem_cache_create().
60439 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60440 */
60441 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60442+
60443+#ifdef CONFIG_PAX_USERCOPY
60444+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60445+#else
60446+#define SLAB_USERCOPY 0x00000000UL
60447+#endif
60448+
60449 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60450 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60451 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60452@@ -87,10 +95,13 @@
60453 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60454 * Both make kfree a no-op.
60455 */
60456-#define ZERO_SIZE_PTR ((void *)16)
60457+#define ZERO_SIZE_PTR \
60458+({ \
60459+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60460+ (void *)(-MAX_ERRNO-1L); \
60461+})
60462
60463-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60464- (unsigned long)ZERO_SIZE_PTR)
60465+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60466
60467 /*
60468 * struct kmem_cache related prototypes
60469@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
60470 void kfree(const void *);
60471 void kzfree(const void *);
60472 size_t ksize(const void *);
60473+void check_object_size(const void *ptr, unsigned long n, bool to);
60474
60475 /*
60476 * Allocator specific definitions. These are mainly used to establish optimized
60477@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t
60478
60479 void __init kmem_cache_init_late(void);
60480
60481+#define kmalloc(x, y) \
60482+({ \
60483+ void *___retval; \
60484+ intoverflow_t ___x = (intoverflow_t)x; \
60485+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60486+ ___retval = NULL; \
60487+ else \
60488+ ___retval = kmalloc((size_t)___x, (y)); \
60489+ ___retval; \
60490+})
60491+
60492+#define kmalloc_node(x, y, z) \
60493+({ \
60494+ void *___retval; \
60495+ intoverflow_t ___x = (intoverflow_t)x; \
60496+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60497+ ___retval = NULL; \
60498+ else \
60499+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60500+ ___retval; \
60501+})
60502+
60503+#define kzalloc(x, y) \
60504+({ \
60505+ void *___retval; \
60506+ intoverflow_t ___x = (intoverflow_t)x; \
60507+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60508+ ___retval = NULL; \
60509+ else \
60510+ ___retval = kzalloc((size_t)___x, (y)); \
60511+ ___retval; \
60512+})
60513+
60514+#define __krealloc(x, y, z) \
60515+({ \
60516+ void *___retval; \
60517+ intoverflow_t ___y = (intoverflow_t)y; \
60518+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60519+ ___retval = NULL; \
60520+ else \
60521+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60522+ ___retval; \
60523+})
60524+
60525+#define krealloc(x, y, z) \
60526+({ \
60527+ void *___retval; \
60528+ intoverflow_t ___y = (intoverflow_t)y; \
60529+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60530+ ___retval = NULL; \
60531+ else \
60532+ ___retval = krealloc((x), (size_t)___y, (z)); \
60533+ ___retval; \
60534+})
60535+
60536 #endif /* _LINUX_SLAB_H */
60537diff -urNp linux-3.1.1/include/linux/slub_def.h linux-3.1.1/include/linux/slub_def.h
60538--- linux-3.1.1/include/linux/slub_def.h 2011-11-11 15:19:27.000000000 -0500
60539+++ linux-3.1.1/include/linux/slub_def.h 2011-11-16 18:39:08.000000000 -0500
60540@@ -85,7 +85,7 @@ struct kmem_cache {
60541 struct kmem_cache_order_objects max;
60542 struct kmem_cache_order_objects min;
60543 gfp_t allocflags; /* gfp flags to use on each alloc */
60544- int refcount; /* Refcount for slab cache destroy */
60545+ atomic_t refcount; /* Refcount for slab cache destroy */
60546 void (*ctor)(void *);
60547 int inuse; /* Offset to metadata */
60548 int align; /* Alignment */
60549@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache
60550 }
60551
60552 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60553-void *__kmalloc(size_t size, gfp_t flags);
60554+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60555
60556 static __always_inline void *
60557 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60558diff -urNp linux-3.1.1/include/linux/sonet.h linux-3.1.1/include/linux/sonet.h
60559--- linux-3.1.1/include/linux/sonet.h 2011-11-11 15:19:27.000000000 -0500
60560+++ linux-3.1.1/include/linux/sonet.h 2011-11-16 18:39:08.000000000 -0500
60561@@ -61,7 +61,7 @@ struct sonet_stats {
60562 #include <linux/atomic.h>
60563
60564 struct k_sonet_stats {
60565-#define __HANDLE_ITEM(i) atomic_t i
60566+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60567 __SONET_ITEMS
60568 #undef __HANDLE_ITEM
60569 };
60570diff -urNp linux-3.1.1/include/linux/sunrpc/clnt.h linux-3.1.1/include/linux/sunrpc/clnt.h
60571--- linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-11 15:19:27.000000000 -0500
60572+++ linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-16 18:39:08.000000000 -0500
60573@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60574 {
60575 switch (sap->sa_family) {
60576 case AF_INET:
60577- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60578+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60579 case AF_INET6:
60580- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60581+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60582 }
60583 return 0;
60584 }
60585@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60586 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60587 const struct sockaddr *src)
60588 {
60589- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60590+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60591 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60592
60593 dsin->sin_family = ssin->sin_family;
60594@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60595 if (sa->sa_family != AF_INET6)
60596 return 0;
60597
60598- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60599+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60600 }
60601
60602 #endif /* __KERNEL__ */
60603diff -urNp linux-3.1.1/include/linux/sunrpc/sched.h linux-3.1.1/include/linux/sunrpc/sched.h
60604--- linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-11 15:19:27.000000000 -0500
60605+++ linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-16 18:39:08.000000000 -0500
60606@@ -105,6 +105,7 @@ struct rpc_call_ops {
60607 void (*rpc_call_done)(struct rpc_task *, void *);
60608 void (*rpc_release)(void *);
60609 };
60610+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60611
60612 struct rpc_task_setup {
60613 struct rpc_task *task;
60614diff -urNp linux-3.1.1/include/linux/sunrpc/svc_rdma.h linux-3.1.1/include/linux/sunrpc/svc_rdma.h
60615--- linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-11 15:19:27.000000000 -0500
60616+++ linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-16 18:39:08.000000000 -0500
60617@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60618 extern unsigned int svcrdma_max_requests;
60619 extern unsigned int svcrdma_max_req_size;
60620
60621-extern atomic_t rdma_stat_recv;
60622-extern atomic_t rdma_stat_read;
60623-extern atomic_t rdma_stat_write;
60624-extern atomic_t rdma_stat_sq_starve;
60625-extern atomic_t rdma_stat_rq_starve;
60626-extern atomic_t rdma_stat_rq_poll;
60627-extern atomic_t rdma_stat_rq_prod;
60628-extern atomic_t rdma_stat_sq_poll;
60629-extern atomic_t rdma_stat_sq_prod;
60630+extern atomic_unchecked_t rdma_stat_recv;
60631+extern atomic_unchecked_t rdma_stat_read;
60632+extern atomic_unchecked_t rdma_stat_write;
60633+extern atomic_unchecked_t rdma_stat_sq_starve;
60634+extern atomic_unchecked_t rdma_stat_rq_starve;
60635+extern atomic_unchecked_t rdma_stat_rq_poll;
60636+extern atomic_unchecked_t rdma_stat_rq_prod;
60637+extern atomic_unchecked_t rdma_stat_sq_poll;
60638+extern atomic_unchecked_t rdma_stat_sq_prod;
60639
60640 #define RPCRDMA_VERSION 1
60641
60642diff -urNp linux-3.1.1/include/linux/sysctl.h linux-3.1.1/include/linux/sysctl.h
60643--- linux-3.1.1/include/linux/sysctl.h 2011-11-11 15:19:27.000000000 -0500
60644+++ linux-3.1.1/include/linux/sysctl.h 2011-11-16 18:40:31.000000000 -0500
60645@@ -155,7 +155,11 @@ enum
60646 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60647 };
60648
60649-
60650+#ifdef CONFIG_PAX_SOFTMODE
60651+enum {
60652+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60653+};
60654+#endif
60655
60656 /* CTL_VM names: */
60657 enum
60658@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60659
60660 extern int proc_dostring(struct ctl_table *, int,
60661 void __user *, size_t *, loff_t *);
60662+extern int proc_dostring_modpriv(struct ctl_table *, int,
60663+ void __user *, size_t *, loff_t *);
60664 extern int proc_dointvec(struct ctl_table *, int,
60665 void __user *, size_t *, loff_t *);
60666 extern int proc_dointvec_minmax(struct ctl_table *, int,
60667diff -urNp linux-3.1.1/include/linux/tty_ldisc.h linux-3.1.1/include/linux/tty_ldisc.h
60668--- linux-3.1.1/include/linux/tty_ldisc.h 2011-11-11 15:19:27.000000000 -0500
60669+++ linux-3.1.1/include/linux/tty_ldisc.h 2011-11-16 18:39:08.000000000 -0500
60670@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60671
60672 struct module *owner;
60673
60674- int refcount;
60675+ atomic_t refcount;
60676 };
60677
60678 struct tty_ldisc {
60679diff -urNp linux-3.1.1/include/linux/types.h linux-3.1.1/include/linux/types.h
60680--- linux-3.1.1/include/linux/types.h 2011-11-11 15:19:27.000000000 -0500
60681+++ linux-3.1.1/include/linux/types.h 2011-11-16 18:39:08.000000000 -0500
60682@@ -213,10 +213,26 @@ typedef struct {
60683 int counter;
60684 } atomic_t;
60685
60686+#ifdef CONFIG_PAX_REFCOUNT
60687+typedef struct {
60688+ int counter;
60689+} atomic_unchecked_t;
60690+#else
60691+typedef atomic_t atomic_unchecked_t;
60692+#endif
60693+
60694 #ifdef CONFIG_64BIT
60695 typedef struct {
60696 long counter;
60697 } atomic64_t;
60698+
60699+#ifdef CONFIG_PAX_REFCOUNT
60700+typedef struct {
60701+ long counter;
60702+} atomic64_unchecked_t;
60703+#else
60704+typedef atomic64_t atomic64_unchecked_t;
60705+#endif
60706 #endif
60707
60708 struct list_head {
60709diff -urNp linux-3.1.1/include/linux/uaccess.h linux-3.1.1/include/linux/uaccess.h
60710--- linux-3.1.1/include/linux/uaccess.h 2011-11-11 15:19:27.000000000 -0500
60711+++ linux-3.1.1/include/linux/uaccess.h 2011-11-16 18:39:08.000000000 -0500
60712@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60713 long ret; \
60714 mm_segment_t old_fs = get_fs(); \
60715 \
60716- set_fs(KERNEL_DS); \
60717 pagefault_disable(); \
60718- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60719- pagefault_enable(); \
60720+ set_fs(KERNEL_DS); \
60721+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60722 set_fs(old_fs); \
60723+ pagefault_enable(); \
60724 ret; \
60725 })
60726
60727diff -urNp linux-3.1.1/include/linux/unaligned/access_ok.h linux-3.1.1/include/linux/unaligned/access_ok.h
60728--- linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-11 15:19:27.000000000 -0500
60729+++ linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-16 18:39:08.000000000 -0500
60730@@ -6,32 +6,32 @@
60731
60732 static inline u16 get_unaligned_le16(const void *p)
60733 {
60734- return le16_to_cpup((__le16 *)p);
60735+ return le16_to_cpup((const __le16 *)p);
60736 }
60737
60738 static inline u32 get_unaligned_le32(const void *p)
60739 {
60740- return le32_to_cpup((__le32 *)p);
60741+ return le32_to_cpup((const __le32 *)p);
60742 }
60743
60744 static inline u64 get_unaligned_le64(const void *p)
60745 {
60746- return le64_to_cpup((__le64 *)p);
60747+ return le64_to_cpup((const __le64 *)p);
60748 }
60749
60750 static inline u16 get_unaligned_be16(const void *p)
60751 {
60752- return be16_to_cpup((__be16 *)p);
60753+ return be16_to_cpup((const __be16 *)p);
60754 }
60755
60756 static inline u32 get_unaligned_be32(const void *p)
60757 {
60758- return be32_to_cpup((__be32 *)p);
60759+ return be32_to_cpup((const __be32 *)p);
60760 }
60761
60762 static inline u64 get_unaligned_be64(const void *p)
60763 {
60764- return be64_to_cpup((__be64 *)p);
60765+ return be64_to_cpup((const __be64 *)p);
60766 }
60767
60768 static inline void put_unaligned_le16(u16 val, void *p)
60769diff -urNp linux-3.1.1/include/linux/vermagic.h linux-3.1.1/include/linux/vermagic.h
60770--- linux-3.1.1/include/linux/vermagic.h 2011-11-11 15:19:27.000000000 -0500
60771+++ linux-3.1.1/include/linux/vermagic.h 2011-11-16 18:54:54.000000000 -0500
60772@@ -26,9 +26,35 @@
60773 #define MODULE_ARCH_VERMAGIC ""
60774 #endif
60775
60776+#ifdef CONFIG_PAX_REFCOUNT
60777+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60778+#else
60779+#define MODULE_PAX_REFCOUNT ""
60780+#endif
60781+
60782+#ifdef CONSTIFY_PLUGIN
60783+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60784+#else
60785+#define MODULE_CONSTIFY_PLUGIN ""
60786+#endif
60787+
60788+#ifdef STACKLEAK_PLUGIN
60789+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
60790+#else
60791+#define MODULE_STACKLEAK_PLUGIN ""
60792+#endif
60793+
60794+#ifdef CONFIG_GRKERNSEC
60795+#define MODULE_GRSEC "GRSEC "
60796+#else
60797+#define MODULE_GRSEC ""
60798+#endif
60799+
60800 #define VERMAGIC_STRING \
60801 UTS_RELEASE " " \
60802 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60803 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60804- MODULE_ARCH_VERMAGIC
60805+ MODULE_ARCH_VERMAGIC \
60806+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
60807+ MODULE_GRSEC
60808
60809diff -urNp linux-3.1.1/include/linux/vmalloc.h linux-3.1.1/include/linux/vmalloc.h
60810--- linux-3.1.1/include/linux/vmalloc.h 2011-11-11 15:19:27.000000000 -0500
60811+++ linux-3.1.1/include/linux/vmalloc.h 2011-11-16 18:39:08.000000000 -0500
60812@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
60813 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60814 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60815 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
60816+
60817+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60818+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
60819+#endif
60820+
60821 /* bits [20..32] reserved for arch specific ioremap internals */
60822
60823 /*
60824@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60825 # endif
60826 #endif
60827
60828+#define vmalloc(x) \
60829+({ \
60830+ void *___retval; \
60831+ intoverflow_t ___x = (intoverflow_t)x; \
60832+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60833+ ___retval = NULL; \
60834+ else \
60835+ ___retval = vmalloc((unsigned long)___x); \
60836+ ___retval; \
60837+})
60838+
60839+#define vzalloc(x) \
60840+({ \
60841+ void *___retval; \
60842+ intoverflow_t ___x = (intoverflow_t)x; \
60843+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60844+ ___retval = NULL; \
60845+ else \
60846+ ___retval = vzalloc((unsigned long)___x); \
60847+ ___retval; \
60848+})
60849+
60850+#define __vmalloc(x, y, z) \
60851+({ \
60852+ void *___retval; \
60853+ intoverflow_t ___x = (intoverflow_t)x; \
60854+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60855+ ___retval = NULL; \
60856+ else \
60857+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60858+ ___retval; \
60859+})
60860+
60861+#define vmalloc_user(x) \
60862+({ \
60863+ void *___retval; \
60864+ intoverflow_t ___x = (intoverflow_t)x; \
60865+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60866+ ___retval = NULL; \
60867+ else \
60868+ ___retval = vmalloc_user((unsigned long)___x); \
60869+ ___retval; \
60870+})
60871+
60872+#define vmalloc_exec(x) \
60873+({ \
60874+ void *___retval; \
60875+ intoverflow_t ___x = (intoverflow_t)x; \
60876+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60877+ ___retval = NULL; \
60878+ else \
60879+ ___retval = vmalloc_exec((unsigned long)___x); \
60880+ ___retval; \
60881+})
60882+
60883+#define vmalloc_node(x, y) \
60884+({ \
60885+ void *___retval; \
60886+ intoverflow_t ___x = (intoverflow_t)x; \
60887+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60888+ ___retval = NULL; \
60889+ else \
60890+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60891+ ___retval; \
60892+})
60893+
60894+#define vzalloc_node(x, y) \
60895+({ \
60896+ void *___retval; \
60897+ intoverflow_t ___x = (intoverflow_t)x; \
60898+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60899+ ___retval = NULL; \
60900+ else \
60901+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60902+ ___retval; \
60903+})
60904+
60905+#define vmalloc_32(x) \
60906+({ \
60907+ void *___retval; \
60908+ intoverflow_t ___x = (intoverflow_t)x; \
60909+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60910+ ___retval = NULL; \
60911+ else \
60912+ ___retval = vmalloc_32((unsigned long)___x); \
60913+ ___retval; \
60914+})
60915+
60916+#define vmalloc_32_user(x) \
60917+({ \
60918+void *___retval; \
60919+ intoverflow_t ___x = (intoverflow_t)x; \
60920+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60921+ ___retval = NULL; \
60922+ else \
60923+ ___retval = vmalloc_32_user((unsigned long)___x);\
60924+ ___retval; \
60925+})
60926+
60927 #endif /* _LINUX_VMALLOC_H */
60928diff -urNp linux-3.1.1/include/linux/vmstat.h linux-3.1.1/include/linux/vmstat.h
60929--- linux-3.1.1/include/linux/vmstat.h 2011-11-11 15:19:27.000000000 -0500
60930+++ linux-3.1.1/include/linux/vmstat.h 2011-11-16 18:39:08.000000000 -0500
60931@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60932 /*
60933 * Zone based page accounting with per cpu differentials.
60934 */
60935-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60936+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60937
60938 static inline void zone_page_state_add(long x, struct zone *zone,
60939 enum zone_stat_item item)
60940 {
60941- atomic_long_add(x, &zone->vm_stat[item]);
60942- atomic_long_add(x, &vm_stat[item]);
60943+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60944+ atomic_long_add_unchecked(x, &vm_stat[item]);
60945 }
60946
60947 static inline unsigned long global_page_state(enum zone_stat_item item)
60948 {
60949- long x = atomic_long_read(&vm_stat[item]);
60950+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60951 #ifdef CONFIG_SMP
60952 if (x < 0)
60953 x = 0;
60954@@ -109,7 +109,7 @@ static inline unsigned long global_page_
60955 static inline unsigned long zone_page_state(struct zone *zone,
60956 enum zone_stat_item item)
60957 {
60958- long x = atomic_long_read(&zone->vm_stat[item]);
60959+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60960 #ifdef CONFIG_SMP
60961 if (x < 0)
60962 x = 0;
60963@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60964 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60965 enum zone_stat_item item)
60966 {
60967- long x = atomic_long_read(&zone->vm_stat[item]);
60968+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60969
60970 #ifdef CONFIG_SMP
60971 int cpu;
60972@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60973
60974 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60975 {
60976- atomic_long_inc(&zone->vm_stat[item]);
60977- atomic_long_inc(&vm_stat[item]);
60978+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60979+ atomic_long_inc_unchecked(&vm_stat[item]);
60980 }
60981
60982 static inline void __inc_zone_page_state(struct page *page,
60983@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60984
60985 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60986 {
60987- atomic_long_dec(&zone->vm_stat[item]);
60988- atomic_long_dec(&vm_stat[item]);
60989+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60990+ atomic_long_dec_unchecked(&vm_stat[item]);
60991 }
60992
60993 static inline void __dec_zone_page_state(struct page *page,
60994diff -urNp linux-3.1.1/include/media/saa7146_vv.h linux-3.1.1/include/media/saa7146_vv.h
60995--- linux-3.1.1/include/media/saa7146_vv.h 2011-11-11 15:19:27.000000000 -0500
60996+++ linux-3.1.1/include/media/saa7146_vv.h 2011-11-16 18:39:08.000000000 -0500
60997@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60998 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60999
61000 /* the extension can override this */
61001- struct v4l2_ioctl_ops ops;
61002+ v4l2_ioctl_ops_no_const ops;
61003 /* pointer to the saa7146 core ops */
61004 const struct v4l2_ioctl_ops *core_ops;
61005
61006diff -urNp linux-3.1.1/include/media/v4l2-dev.h linux-3.1.1/include/media/v4l2-dev.h
61007--- linux-3.1.1/include/media/v4l2-dev.h 2011-11-11 15:19:27.000000000 -0500
61008+++ linux-3.1.1/include/media/v4l2-dev.h 2011-11-16 18:39:08.000000000 -0500
61009@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
61010
61011
61012 struct v4l2_file_operations {
61013- struct module *owner;
61014+ struct module * const owner;
61015 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61016 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61017 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61018@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61019 int (*open) (struct file *);
61020 int (*release) (struct file *);
61021 };
61022+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61023
61024 /*
61025 * Newer version of video_device, handled by videodev2.c
61026diff -urNp linux-3.1.1/include/media/v4l2-ioctl.h linux-3.1.1/include/media/v4l2-ioctl.h
61027--- linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-11 15:19:27.000000000 -0500
61028+++ linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-16 18:40:44.000000000 -0500
61029@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
61030 long (*vidioc_default) (struct file *file, void *fh,
61031 bool valid_prio, int cmd, void *arg);
61032 };
61033+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61034
61035
61036 /* v4l debugging and diagnostics */
61037diff -urNp linux-3.1.1/include/net/caif/caif_hsi.h linux-3.1.1/include/net/caif/caif_hsi.h
61038--- linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-11 15:19:27.000000000 -0500
61039+++ linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-16 18:39:08.000000000 -0500
61040@@ -94,7 +94,7 @@ struct cfhsi_drv {
61041 void (*rx_done_cb) (struct cfhsi_drv *drv);
61042 void (*wake_up_cb) (struct cfhsi_drv *drv);
61043 void (*wake_down_cb) (struct cfhsi_drv *drv);
61044-};
61045+} __no_const;
61046
61047 /* Structure implemented by HSI device. */
61048 struct cfhsi_dev {
61049diff -urNp linux-3.1.1/include/net/caif/cfctrl.h linux-3.1.1/include/net/caif/cfctrl.h
61050--- linux-3.1.1/include/net/caif/cfctrl.h 2011-11-11 15:19:27.000000000 -0500
61051+++ linux-3.1.1/include/net/caif/cfctrl.h 2011-11-16 18:39:08.000000000 -0500
61052@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61053 void (*radioset_rsp)(void);
61054 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61055 struct cflayer *client_layer);
61056-};
61057+} __no_const;
61058
61059 /* Link Setup Parameters for CAIF-Links. */
61060 struct cfctrl_link_param {
61061@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61062 struct cfctrl {
61063 struct cfsrvl serv;
61064 struct cfctrl_rsp res;
61065- atomic_t req_seq_no;
61066- atomic_t rsp_seq_no;
61067+ atomic_unchecked_t req_seq_no;
61068+ atomic_unchecked_t rsp_seq_no;
61069 struct list_head list;
61070 /* Protects from simultaneous access to first_req list */
61071 spinlock_t info_list_lock;
61072diff -urNp linux-3.1.1/include/net/flow.h linux-3.1.1/include/net/flow.h
61073--- linux-3.1.1/include/net/flow.h 2011-11-11 15:19:27.000000000 -0500
61074+++ linux-3.1.1/include/net/flow.h 2011-11-16 18:39:08.000000000 -0500
61075@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_ca
61076 u8 dir, flow_resolve_t resolver, void *ctx);
61077
61078 extern void flow_cache_flush(void);
61079-extern atomic_t flow_cache_genid;
61080+extern atomic_unchecked_t flow_cache_genid;
61081
61082 #endif
61083diff -urNp linux-3.1.1/include/net/inetpeer.h linux-3.1.1/include/net/inetpeer.h
61084--- linux-3.1.1/include/net/inetpeer.h 2011-11-11 15:19:27.000000000 -0500
61085+++ linux-3.1.1/include/net/inetpeer.h 2011-11-16 18:39:08.000000000 -0500
61086@@ -47,8 +47,8 @@ struct inet_peer {
61087 */
61088 union {
61089 struct {
61090- atomic_t rid; /* Frag reception counter */
61091- atomic_t ip_id_count; /* IP ID for the next packet */
61092+ atomic_unchecked_t rid; /* Frag reception counter */
61093+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61094 __u32 tcp_ts;
61095 __u32 tcp_ts_stamp;
61096 };
61097@@ -112,11 +112,11 @@ static inline int inet_getid(struct inet
61098 more++;
61099 inet_peer_refcheck(p);
61100 do {
61101- old = atomic_read(&p->ip_id_count);
61102+ old = atomic_read_unchecked(&p->ip_id_count);
61103 new = old + more;
61104 if (!new)
61105 new = 1;
61106- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61107+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61108 return new;
61109 }
61110
61111diff -urNp linux-3.1.1/include/net/ip_fib.h linux-3.1.1/include/net/ip_fib.h
61112--- linux-3.1.1/include/net/ip_fib.h 2011-11-11 15:19:27.000000000 -0500
61113+++ linux-3.1.1/include/net/ip_fib.h 2011-11-16 18:39:08.000000000 -0500
61114@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
61115
61116 #define FIB_RES_SADDR(net, res) \
61117 ((FIB_RES_NH(res).nh_saddr_genid == \
61118- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61119+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61120 FIB_RES_NH(res).nh_saddr : \
61121 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61122 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61123diff -urNp linux-3.1.1/include/net/ip_vs.h linux-3.1.1/include/net/ip_vs.h
61124--- linux-3.1.1/include/net/ip_vs.h 2011-11-11 15:19:27.000000000 -0500
61125+++ linux-3.1.1/include/net/ip_vs.h 2011-11-16 18:39:08.000000000 -0500
61126@@ -509,7 +509,7 @@ struct ip_vs_conn {
61127 struct ip_vs_conn *control; /* Master control connection */
61128 atomic_t n_control; /* Number of controlled ones */
61129 struct ip_vs_dest *dest; /* real server */
61130- atomic_t in_pkts; /* incoming packet counter */
61131+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61132
61133 /* packet transmitter for different forwarding methods. If it
61134 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61135@@ -647,7 +647,7 @@ struct ip_vs_dest {
61136 __be16 port; /* port number of the server */
61137 union nf_inet_addr addr; /* IP address of the server */
61138 volatile unsigned flags; /* dest status flags */
61139- atomic_t conn_flags; /* flags to copy to conn */
61140+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61141 atomic_t weight; /* server weight */
61142
61143 atomic_t refcnt; /* reference counter */
61144diff -urNp linux-3.1.1/include/net/irda/ircomm_core.h linux-3.1.1/include/net/irda/ircomm_core.h
61145--- linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-11 15:19:27.000000000 -0500
61146+++ linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-16 18:39:08.000000000 -0500
61147@@ -51,7 +51,7 @@ typedef struct {
61148 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61149 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61150 struct ircomm_info *);
61151-} call_t;
61152+} __no_const call_t;
61153
61154 struct ircomm_cb {
61155 irda_queue_t queue;
61156diff -urNp linux-3.1.1/include/net/irda/ircomm_tty.h linux-3.1.1/include/net/irda/ircomm_tty.h
61157--- linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-11 15:19:27.000000000 -0500
61158+++ linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-16 18:39:08.000000000 -0500
61159@@ -35,6 +35,7 @@
61160 #include <linux/termios.h>
61161 #include <linux/timer.h>
61162 #include <linux/tty.h> /* struct tty_struct */
61163+#include <asm/local.h>
61164
61165 #include <net/irda/irias_object.h>
61166 #include <net/irda/ircomm_core.h>
61167@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61168 unsigned short close_delay;
61169 unsigned short closing_wait; /* time to wait before closing */
61170
61171- int open_count;
61172- int blocked_open; /* # of blocked opens */
61173+ local_t open_count;
61174+ local_t blocked_open; /* # of blocked opens */
61175
61176 /* Protect concurent access to :
61177 * o self->open_count
61178diff -urNp linux-3.1.1/include/net/iucv/af_iucv.h linux-3.1.1/include/net/iucv/af_iucv.h
61179--- linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-11 15:19:27.000000000 -0500
61180+++ linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-16 18:39:08.000000000 -0500
61181@@ -87,7 +87,7 @@ struct iucv_sock {
61182 struct iucv_sock_list {
61183 struct hlist_head head;
61184 rwlock_t lock;
61185- atomic_t autobind_name;
61186+ atomic_unchecked_t autobind_name;
61187 };
61188
61189 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61190diff -urNp linux-3.1.1/include/net/lapb.h linux-3.1.1/include/net/lapb.h
61191--- linux-3.1.1/include/net/lapb.h 2011-11-11 15:19:27.000000000 -0500
61192+++ linux-3.1.1/include/net/lapb.h 2011-11-16 18:39:08.000000000 -0500
61193@@ -95,7 +95,7 @@ struct lapb_cb {
61194 struct sk_buff_head write_queue;
61195 struct sk_buff_head ack_queue;
61196 unsigned char window;
61197- struct lapb_register_struct callbacks;
61198+ struct lapb_register_struct *callbacks;
61199
61200 /* FRMR control information */
61201 struct lapb_frame frmr_data;
61202diff -urNp linux-3.1.1/include/net/neighbour.h linux-3.1.1/include/net/neighbour.h
61203--- linux-3.1.1/include/net/neighbour.h 2011-11-11 15:19:27.000000000 -0500
61204+++ linux-3.1.1/include/net/neighbour.h 2011-11-16 18:39:08.000000000 -0500
61205@@ -122,7 +122,7 @@ struct neigh_ops {
61206 void (*error_report)(struct neighbour *, struct sk_buff *);
61207 int (*output)(struct neighbour *, struct sk_buff *);
61208 int (*connected_output)(struct neighbour *, struct sk_buff *);
61209-};
61210+} __do_const;
61211
61212 struct pneigh_entry {
61213 struct pneigh_entry *next;
61214diff -urNp linux-3.1.1/include/net/netlink.h linux-3.1.1/include/net/netlink.h
61215--- linux-3.1.1/include/net/netlink.h 2011-11-11 15:19:27.000000000 -0500
61216+++ linux-3.1.1/include/net/netlink.h 2011-11-16 18:39:08.000000000 -0500
61217@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
61218 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61219 {
61220 if (mark)
61221- skb_trim(skb, (unsigned char *) mark - skb->data);
61222+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61223 }
61224
61225 /**
61226diff -urNp linux-3.1.1/include/net/netns/ipv4.h linux-3.1.1/include/net/netns/ipv4.h
61227--- linux-3.1.1/include/net/netns/ipv4.h 2011-11-11 15:19:27.000000000 -0500
61228+++ linux-3.1.1/include/net/netns/ipv4.h 2011-11-16 18:39:08.000000000 -0500
61229@@ -56,8 +56,8 @@ struct netns_ipv4 {
61230
61231 unsigned int sysctl_ping_group_range[2];
61232
61233- atomic_t rt_genid;
61234- atomic_t dev_addr_genid;
61235+ atomic_unchecked_t rt_genid;
61236+ atomic_unchecked_t dev_addr_genid;
61237
61238 #ifdef CONFIG_IP_MROUTE
61239 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61240diff -urNp linux-3.1.1/include/net/sctp/sctp.h linux-3.1.1/include/net/sctp/sctp.h
61241--- linux-3.1.1/include/net/sctp/sctp.h 2011-11-11 15:19:27.000000000 -0500
61242+++ linux-3.1.1/include/net/sctp/sctp.h 2011-11-16 18:39:08.000000000 -0500
61243@@ -318,9 +318,9 @@ do { \
61244
61245 #else /* SCTP_DEBUG */
61246
61247-#define SCTP_DEBUG_PRINTK(whatever...)
61248-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61249-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61250+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61251+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61252+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61253 #define SCTP_ENABLE_DEBUG
61254 #define SCTP_DISABLE_DEBUG
61255 #define SCTP_ASSERT(expr, str, func)
61256diff -urNp linux-3.1.1/include/net/sock.h linux-3.1.1/include/net/sock.h
61257--- linux-3.1.1/include/net/sock.h 2011-11-11 15:19:27.000000000 -0500
61258+++ linux-3.1.1/include/net/sock.h 2011-11-16 18:39:08.000000000 -0500
61259@@ -278,7 +278,7 @@ struct sock {
61260 #ifdef CONFIG_RPS
61261 __u32 sk_rxhash;
61262 #endif
61263- atomic_t sk_drops;
61264+ atomic_unchecked_t sk_drops;
61265 int sk_rcvbuf;
61266
61267 struct sk_filter __rcu *sk_filter;
61268@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct
61269 }
61270
61271 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61272- char __user *from, char *to,
61273+ char __user *from, unsigned char *to,
61274 int copy, int offset)
61275 {
61276 if (skb->ip_summed == CHECKSUM_NONE) {
61277diff -urNp linux-3.1.1/include/net/tcp.h linux-3.1.1/include/net/tcp.h
61278--- linux-3.1.1/include/net/tcp.h 2011-11-11 15:19:27.000000000 -0500
61279+++ linux-3.1.1/include/net/tcp.h 2011-11-16 18:39:08.000000000 -0500
61280@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
61281 struct tcp_seq_afinfo {
61282 char *name;
61283 sa_family_t family;
61284- struct file_operations seq_fops;
61285- struct seq_operations seq_ops;
61286+ file_operations_no_const seq_fops;
61287+ seq_operations_no_const seq_ops;
61288 };
61289
61290 struct tcp_iter_state {
61291diff -urNp linux-3.1.1/include/net/udp.h linux-3.1.1/include/net/udp.h
61292--- linux-3.1.1/include/net/udp.h 2011-11-11 15:19:27.000000000 -0500
61293+++ linux-3.1.1/include/net/udp.h 2011-11-16 18:39:08.000000000 -0500
61294@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61295 char *name;
61296 sa_family_t family;
61297 struct udp_table *udp_table;
61298- struct file_operations seq_fops;
61299- struct seq_operations seq_ops;
61300+ file_operations_no_const seq_fops;
61301+ seq_operations_no_const seq_ops;
61302 };
61303
61304 struct udp_iter_state {
61305diff -urNp linux-3.1.1/include/net/xfrm.h linux-3.1.1/include/net/xfrm.h
61306--- linux-3.1.1/include/net/xfrm.h 2011-11-11 15:19:27.000000000 -0500
61307+++ linux-3.1.1/include/net/xfrm.h 2011-11-16 18:39:08.000000000 -0500
61308@@ -505,7 +505,7 @@ struct xfrm_policy {
61309 struct timer_list timer;
61310
61311 struct flow_cache_object flo;
61312- atomic_t genid;
61313+ atomic_unchecked_t genid;
61314 u32 priority;
61315 u32 index;
61316 struct xfrm_mark mark;
61317diff -urNp linux-3.1.1/include/rdma/iw_cm.h linux-3.1.1/include/rdma/iw_cm.h
61318--- linux-3.1.1/include/rdma/iw_cm.h 2011-11-11 15:19:27.000000000 -0500
61319+++ linux-3.1.1/include/rdma/iw_cm.h 2011-11-16 18:39:08.000000000 -0500
61320@@ -120,7 +120,7 @@ struct iw_cm_verbs {
61321 int backlog);
61322
61323 int (*destroy_listen)(struct iw_cm_id *cm_id);
61324-};
61325+} __no_const;
61326
61327 /**
61328 * iw_create_cm_id - Create an IW CM identifier.
61329diff -urNp linux-3.1.1/include/scsi/libfc.h linux-3.1.1/include/scsi/libfc.h
61330--- linux-3.1.1/include/scsi/libfc.h 2011-11-11 15:19:27.000000000 -0500
61331+++ linux-3.1.1/include/scsi/libfc.h 2011-11-16 18:39:08.000000000 -0500
61332@@ -758,6 +758,7 @@ struct libfc_function_template {
61333 */
61334 void (*disc_stop_final) (struct fc_lport *);
61335 };
61336+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61337
61338 /**
61339 * struct fc_disc - Discovery context
61340@@ -861,7 +862,7 @@ struct fc_lport {
61341 struct fc_vport *vport;
61342
61343 /* Operational Information */
61344- struct libfc_function_template tt;
61345+ libfc_function_template_no_const tt;
61346 u8 link_up;
61347 u8 qfull;
61348 enum fc_lport_state state;
61349diff -urNp linux-3.1.1/include/scsi/scsi_device.h linux-3.1.1/include/scsi/scsi_device.h
61350--- linux-3.1.1/include/scsi/scsi_device.h 2011-11-11 15:19:27.000000000 -0500
61351+++ linux-3.1.1/include/scsi/scsi_device.h 2011-11-16 18:39:08.000000000 -0500
61352@@ -161,9 +161,9 @@ struct scsi_device {
61353 unsigned int max_device_blocked; /* what device_blocked counts down from */
61354 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61355
61356- atomic_t iorequest_cnt;
61357- atomic_t iodone_cnt;
61358- atomic_t ioerr_cnt;
61359+ atomic_unchecked_t iorequest_cnt;
61360+ atomic_unchecked_t iodone_cnt;
61361+ atomic_unchecked_t ioerr_cnt;
61362
61363 struct device sdev_gendev,
61364 sdev_dev;
61365diff -urNp linux-3.1.1/include/scsi/scsi_transport_fc.h linux-3.1.1/include/scsi/scsi_transport_fc.h
61366--- linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-11 15:19:27.000000000 -0500
61367+++ linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-16 18:39:08.000000000 -0500
61368@@ -711,7 +711,7 @@ struct fc_function_template {
61369 unsigned long show_host_system_hostname:1;
61370
61371 unsigned long disable_target_scan:1;
61372-};
61373+} __do_const;
61374
61375
61376 /**
61377diff -urNp linux-3.1.1/include/sound/ak4xxx-adda.h linux-3.1.1/include/sound/ak4xxx-adda.h
61378--- linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-11 15:19:27.000000000 -0500
61379+++ linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-16 18:39:08.000000000 -0500
61380@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61381 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61382 unsigned char val);
61383 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61384-};
61385+} __no_const;
61386
61387 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61388
61389diff -urNp linux-3.1.1/include/sound/hwdep.h linux-3.1.1/include/sound/hwdep.h
61390--- linux-3.1.1/include/sound/hwdep.h 2011-11-11 15:19:27.000000000 -0500
61391+++ linux-3.1.1/include/sound/hwdep.h 2011-11-16 18:39:08.000000000 -0500
61392@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61393 struct snd_hwdep_dsp_status *status);
61394 int (*dsp_load)(struct snd_hwdep *hw,
61395 struct snd_hwdep_dsp_image *image);
61396-};
61397+} __no_const;
61398
61399 struct snd_hwdep {
61400 struct snd_card *card;
61401diff -urNp linux-3.1.1/include/sound/info.h linux-3.1.1/include/sound/info.h
61402--- linux-3.1.1/include/sound/info.h 2011-11-11 15:19:27.000000000 -0500
61403+++ linux-3.1.1/include/sound/info.h 2011-11-16 18:39:08.000000000 -0500
61404@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61405 struct snd_info_buffer *buffer);
61406 void (*write)(struct snd_info_entry *entry,
61407 struct snd_info_buffer *buffer);
61408-};
61409+} __no_const;
61410
61411 struct snd_info_entry_ops {
61412 int (*open)(struct snd_info_entry *entry,
61413diff -urNp linux-3.1.1/include/sound/pcm.h linux-3.1.1/include/sound/pcm.h
61414--- linux-3.1.1/include/sound/pcm.h 2011-11-11 15:19:27.000000000 -0500
61415+++ linux-3.1.1/include/sound/pcm.h 2011-11-16 18:39:08.000000000 -0500
61416@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61417 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61418 int (*ack)(struct snd_pcm_substream *substream);
61419 };
61420+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61421
61422 /*
61423 *
61424diff -urNp linux-3.1.1/include/sound/sb16_csp.h linux-3.1.1/include/sound/sb16_csp.h
61425--- linux-3.1.1/include/sound/sb16_csp.h 2011-11-11 15:19:27.000000000 -0500
61426+++ linux-3.1.1/include/sound/sb16_csp.h 2011-11-16 18:39:08.000000000 -0500
61427@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61428 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61429 int (*csp_stop) (struct snd_sb_csp * p);
61430 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61431-};
61432+} __no_const;
61433
61434 /*
61435 * CSP private data
61436diff -urNp linux-3.1.1/include/sound/soc.h linux-3.1.1/include/sound/soc.h
61437--- linux-3.1.1/include/sound/soc.h 2011-11-11 15:19:27.000000000 -0500
61438+++ linux-3.1.1/include/sound/soc.h 2011-11-16 18:39:08.000000000 -0500
61439@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
61440 /* platform IO - used for platform DAPM */
61441 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61442 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61443-};
61444+} __do_const;
61445
61446 struct snd_soc_platform {
61447 const char *name;
61448diff -urNp linux-3.1.1/include/sound/ymfpci.h linux-3.1.1/include/sound/ymfpci.h
61449--- linux-3.1.1/include/sound/ymfpci.h 2011-11-11 15:19:27.000000000 -0500
61450+++ linux-3.1.1/include/sound/ymfpci.h 2011-11-16 18:39:08.000000000 -0500
61451@@ -358,7 +358,7 @@ struct snd_ymfpci {
61452 spinlock_t reg_lock;
61453 spinlock_t voice_lock;
61454 wait_queue_head_t interrupt_sleep;
61455- atomic_t interrupt_sleep_count;
61456+ atomic_unchecked_t interrupt_sleep_count;
61457 struct snd_info_entry *proc_entry;
61458 const struct firmware *dsp_microcode;
61459 const struct firmware *controller_microcode;
61460diff -urNp linux-3.1.1/include/target/target_core_base.h linux-3.1.1/include/target/target_core_base.h
61461--- linux-3.1.1/include/target/target_core_base.h 2011-11-11 15:19:27.000000000 -0500
61462+++ linux-3.1.1/include/target/target_core_base.h 2011-11-16 18:39:08.000000000 -0500
61463@@ -356,7 +356,7 @@ struct t10_reservation_ops {
61464 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61465 int (*t10_pr_register)(struct se_cmd *);
61466 int (*t10_pr_clear)(struct se_cmd *);
61467-};
61468+} __no_const;
61469
61470 struct t10_reservation {
61471 /* Reservation effects all target ports */
61472@@ -496,8 +496,8 @@ struct se_cmd {
61473 atomic_t t_task_cdbs_left;
61474 atomic_t t_task_cdbs_ex_left;
61475 atomic_t t_task_cdbs_timeout_left;
61476- atomic_t t_task_cdbs_sent;
61477- atomic_t t_transport_aborted;
61478+ atomic_unchecked_t t_task_cdbs_sent;
61479+ atomic_unchecked_t t_transport_aborted;
61480 atomic_t t_transport_active;
61481 atomic_t t_transport_complete;
61482 atomic_t t_transport_queue_active;
61483@@ -744,7 +744,7 @@ struct se_device {
61484 atomic_t active_cmds;
61485 atomic_t simple_cmds;
61486 atomic_t depth_left;
61487- atomic_t dev_ordered_id;
61488+ atomic_unchecked_t dev_ordered_id;
61489 atomic_t dev_tur_active;
61490 atomic_t execute_tasks;
61491 atomic_t dev_status_thr_count;
61492diff -urNp linux-3.1.1/include/trace/events/irq.h linux-3.1.1/include/trace/events/irq.h
61493--- linux-3.1.1/include/trace/events/irq.h 2011-11-11 15:19:27.000000000 -0500
61494+++ linux-3.1.1/include/trace/events/irq.h 2011-11-16 18:39:08.000000000 -0500
61495@@ -36,7 +36,7 @@ struct softirq_action;
61496 */
61497 TRACE_EVENT(irq_handler_entry,
61498
61499- TP_PROTO(int irq, struct irqaction *action),
61500+ TP_PROTO(int irq, const struct irqaction *action),
61501
61502 TP_ARGS(irq, action),
61503
61504@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61505 */
61506 TRACE_EVENT(irq_handler_exit,
61507
61508- TP_PROTO(int irq, struct irqaction *action, int ret),
61509+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61510
61511 TP_ARGS(irq, action, ret),
61512
61513diff -urNp linux-3.1.1/include/video/udlfb.h linux-3.1.1/include/video/udlfb.h
61514--- linux-3.1.1/include/video/udlfb.h 2011-11-11 15:19:27.000000000 -0500
61515+++ linux-3.1.1/include/video/udlfb.h 2011-11-16 18:39:08.000000000 -0500
61516@@ -51,10 +51,10 @@ struct dlfb_data {
61517 int base8;
61518 u32 pseudo_palette[256];
61519 /* blit-only rendering path metrics, exposed through sysfs */
61520- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61521- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61522- atomic_t bytes_sent; /* to usb, after compression including overhead */
61523- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61524+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61525+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61526+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61527+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61528 };
61529
61530 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61531diff -urNp linux-3.1.1/include/video/uvesafb.h linux-3.1.1/include/video/uvesafb.h
61532--- linux-3.1.1/include/video/uvesafb.h 2011-11-11 15:19:27.000000000 -0500
61533+++ linux-3.1.1/include/video/uvesafb.h 2011-11-16 18:39:08.000000000 -0500
61534@@ -177,6 +177,7 @@ struct uvesafb_par {
61535 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61536 u8 pmi_setpal; /* PMI for palette changes */
61537 u16 *pmi_base; /* protected mode interface location */
61538+ u8 *pmi_code; /* protected mode code location */
61539 void *pmi_start;
61540 void *pmi_pal;
61541 u8 *vbe_state_orig; /*
61542diff -urNp linux-3.1.1/init/do_mounts.c linux-3.1.1/init/do_mounts.c
61543--- linux-3.1.1/init/do_mounts.c 2011-11-11 15:19:27.000000000 -0500
61544+++ linux-3.1.1/init/do_mounts.c 2011-11-16 18:39:08.000000000 -0500
61545@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61546
61547 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61548 {
61549- int err = sys_mount(name, "/root", fs, flags, data);
61550+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61551 if (err)
61552 return err;
61553
61554- sys_chdir((const char __user __force *)"/root");
61555+ sys_chdir((const char __force_user*)"/root");
61556 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61557 printk(KERN_INFO
61558 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61559@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61560 va_start(args, fmt);
61561 vsprintf(buf, fmt, args);
61562 va_end(args);
61563- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61564+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61565 if (fd >= 0) {
61566 sys_ioctl(fd, FDEJECT, 0);
61567 sys_close(fd);
61568 }
61569 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61570- fd = sys_open("/dev/console", O_RDWR, 0);
61571+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61572 if (fd >= 0) {
61573 sys_ioctl(fd, TCGETS, (long)&termios);
61574 termios.c_lflag &= ~ICANON;
61575 sys_ioctl(fd, TCSETSF, (long)&termios);
61576- sys_read(fd, &c, 1);
61577+ sys_read(fd, (char __user *)&c, 1);
61578 termios.c_lflag |= ICANON;
61579 sys_ioctl(fd, TCSETSF, (long)&termios);
61580 sys_close(fd);
61581@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61582 mount_root();
61583 out:
61584 devtmpfs_mount("dev");
61585- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61586- sys_chroot((const char __user __force *)".");
61587+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61588+ sys_chroot((const char __force_user *)".");
61589 }
61590diff -urNp linux-3.1.1/init/do_mounts.h linux-3.1.1/init/do_mounts.h
61591--- linux-3.1.1/init/do_mounts.h 2011-11-11 15:19:27.000000000 -0500
61592+++ linux-3.1.1/init/do_mounts.h 2011-11-16 18:39:08.000000000 -0500
61593@@ -15,15 +15,15 @@ extern int root_mountflags;
61594
61595 static inline int create_dev(char *name, dev_t dev)
61596 {
61597- sys_unlink(name);
61598- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61599+ sys_unlink((char __force_user *)name);
61600+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61601 }
61602
61603 #if BITS_PER_LONG == 32
61604 static inline u32 bstat(char *name)
61605 {
61606 struct stat64 stat;
61607- if (sys_stat64(name, &stat) != 0)
61608+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61609 return 0;
61610 if (!S_ISBLK(stat.st_mode))
61611 return 0;
61612@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61613 static inline u32 bstat(char *name)
61614 {
61615 struct stat stat;
61616- if (sys_newstat(name, &stat) != 0)
61617+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61618 return 0;
61619 if (!S_ISBLK(stat.st_mode))
61620 return 0;
61621diff -urNp linux-3.1.1/init/do_mounts_initrd.c linux-3.1.1/init/do_mounts_initrd.c
61622--- linux-3.1.1/init/do_mounts_initrd.c 2011-11-11 15:19:27.000000000 -0500
61623+++ linux-3.1.1/init/do_mounts_initrd.c 2011-11-16 18:39:08.000000000 -0500
61624@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61625 create_dev("/dev/root.old", Root_RAM0);
61626 /* mount initrd on rootfs' /root */
61627 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61628- sys_mkdir("/old", 0700);
61629- root_fd = sys_open("/", 0, 0);
61630- old_fd = sys_open("/old", 0, 0);
61631+ sys_mkdir((const char __force_user *)"/old", 0700);
61632+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61633+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61634 /* move initrd over / and chdir/chroot in initrd root */
61635- sys_chdir("/root");
61636- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61637- sys_chroot(".");
61638+ sys_chdir((const char __force_user *)"/root");
61639+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61640+ sys_chroot((const char __force_user *)".");
61641
61642 /*
61643 * In case that a resume from disk is carried out by linuxrc or one of
61644@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61645
61646 /* move initrd to rootfs' /old */
61647 sys_fchdir(old_fd);
61648- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61649+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61650 /* switch root and cwd back to / of rootfs */
61651 sys_fchdir(root_fd);
61652- sys_chroot(".");
61653+ sys_chroot((const char __force_user *)".");
61654 sys_close(old_fd);
61655 sys_close(root_fd);
61656
61657 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61658- sys_chdir("/old");
61659+ sys_chdir((const char __force_user *)"/old");
61660 return;
61661 }
61662
61663@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61664 mount_root();
61665
61666 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61667- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61668+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61669 if (!error)
61670 printk("okay\n");
61671 else {
61672- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61673+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61674 if (error == -ENOENT)
61675 printk("/initrd does not exist. Ignored.\n");
61676 else
61677 printk("failed\n");
61678 printk(KERN_NOTICE "Unmounting old root\n");
61679- sys_umount("/old", MNT_DETACH);
61680+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61681 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61682 if (fd < 0) {
61683 error = fd;
61684@@ -116,11 +116,11 @@ int __init initrd_load(void)
61685 * mounted in the normal path.
61686 */
61687 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61688- sys_unlink("/initrd.image");
61689+ sys_unlink((const char __force_user *)"/initrd.image");
61690 handle_initrd();
61691 return 1;
61692 }
61693 }
61694- sys_unlink("/initrd.image");
61695+ sys_unlink((const char __force_user *)"/initrd.image");
61696 return 0;
61697 }
61698diff -urNp linux-3.1.1/init/do_mounts_md.c linux-3.1.1/init/do_mounts_md.c
61699--- linux-3.1.1/init/do_mounts_md.c 2011-11-11 15:19:27.000000000 -0500
61700+++ linux-3.1.1/init/do_mounts_md.c 2011-11-16 18:39:08.000000000 -0500
61701@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61702 partitioned ? "_d" : "", minor,
61703 md_setup_args[ent].device_names);
61704
61705- fd = sys_open(name, 0, 0);
61706+ fd = sys_open((char __force_user *)name, 0, 0);
61707 if (fd < 0) {
61708 printk(KERN_ERR "md: open failed - cannot start "
61709 "array %s\n", name);
61710@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61711 * array without it
61712 */
61713 sys_close(fd);
61714- fd = sys_open(name, 0, 0);
61715+ fd = sys_open((char __force_user *)name, 0, 0);
61716 sys_ioctl(fd, BLKRRPART, 0);
61717 }
61718 sys_close(fd);
61719@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61720
61721 wait_for_device_probe();
61722
61723- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61724+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61725 if (fd >= 0) {
61726 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61727 sys_close(fd);
61728diff -urNp linux-3.1.1/init/initramfs.c linux-3.1.1/init/initramfs.c
61729--- linux-3.1.1/init/initramfs.c 2011-11-11 15:19:27.000000000 -0500
61730+++ linux-3.1.1/init/initramfs.c 2011-11-16 18:39:08.000000000 -0500
61731@@ -74,7 +74,7 @@ static void __init free_hash(void)
61732 }
61733 }
61734
61735-static long __init do_utime(char __user *filename, time_t mtime)
61736+static long __init do_utime(__force char __user *filename, time_t mtime)
61737 {
61738 struct timespec t[2];
61739
61740@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61741 struct dir_entry *de, *tmp;
61742 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61743 list_del(&de->list);
61744- do_utime(de->name, de->mtime);
61745+ do_utime((char __force_user *)de->name, de->mtime);
61746 kfree(de->name);
61747 kfree(de);
61748 }
61749@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61750 if (nlink >= 2) {
61751 char *old = find_link(major, minor, ino, mode, collected);
61752 if (old)
61753- return (sys_link(old, collected) < 0) ? -1 : 1;
61754+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61755 }
61756 return 0;
61757 }
61758@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61759 {
61760 struct stat st;
61761
61762- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61763+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61764 if (S_ISDIR(st.st_mode))
61765- sys_rmdir(path);
61766+ sys_rmdir((char __force_user *)path);
61767 else
61768- sys_unlink(path);
61769+ sys_unlink((char __force_user *)path);
61770 }
61771 }
61772
61773@@ -305,7 +305,7 @@ static int __init do_name(void)
61774 int openflags = O_WRONLY|O_CREAT;
61775 if (ml != 1)
61776 openflags |= O_TRUNC;
61777- wfd = sys_open(collected, openflags, mode);
61778+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61779
61780 if (wfd >= 0) {
61781 sys_fchown(wfd, uid, gid);
61782@@ -317,17 +317,17 @@ static int __init do_name(void)
61783 }
61784 }
61785 } else if (S_ISDIR(mode)) {
61786- sys_mkdir(collected, mode);
61787- sys_chown(collected, uid, gid);
61788- sys_chmod(collected, mode);
61789+ sys_mkdir((char __force_user *)collected, mode);
61790+ sys_chown((char __force_user *)collected, uid, gid);
61791+ sys_chmod((char __force_user *)collected, mode);
61792 dir_add(collected, mtime);
61793 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61794 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61795 if (maybe_link() == 0) {
61796- sys_mknod(collected, mode, rdev);
61797- sys_chown(collected, uid, gid);
61798- sys_chmod(collected, mode);
61799- do_utime(collected, mtime);
61800+ sys_mknod((char __force_user *)collected, mode, rdev);
61801+ sys_chown((char __force_user *)collected, uid, gid);
61802+ sys_chmod((char __force_user *)collected, mode);
61803+ do_utime((char __force_user *)collected, mtime);
61804 }
61805 }
61806 return 0;
61807@@ -336,15 +336,15 @@ static int __init do_name(void)
61808 static int __init do_copy(void)
61809 {
61810 if (count >= body_len) {
61811- sys_write(wfd, victim, body_len);
61812+ sys_write(wfd, (char __force_user *)victim, body_len);
61813 sys_close(wfd);
61814- do_utime(vcollected, mtime);
61815+ do_utime((char __force_user *)vcollected, mtime);
61816 kfree(vcollected);
61817 eat(body_len);
61818 state = SkipIt;
61819 return 0;
61820 } else {
61821- sys_write(wfd, victim, count);
61822+ sys_write(wfd, (char __force_user *)victim, count);
61823 body_len -= count;
61824 eat(count);
61825 return 1;
61826@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61827 {
61828 collected[N_ALIGN(name_len) + body_len] = '\0';
61829 clean_path(collected, 0);
61830- sys_symlink(collected + N_ALIGN(name_len), collected);
61831- sys_lchown(collected, uid, gid);
61832- do_utime(collected, mtime);
61833+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61834+ sys_lchown((char __force_user *)collected, uid, gid);
61835+ do_utime((char __force_user *)collected, mtime);
61836 state = SkipIt;
61837 next_state = Reset;
61838 return 0;
61839diff -urNp linux-3.1.1/init/Kconfig linux-3.1.1/init/Kconfig
61840--- linux-3.1.1/init/Kconfig 2011-11-11 15:19:27.000000000 -0500
61841+++ linux-3.1.1/init/Kconfig 2011-11-16 18:39:08.000000000 -0500
61842@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
61843
61844 config COMPAT_BRK
61845 bool "Disable heap randomization"
61846- default y
61847+ default n
61848 help
61849 Randomizing heap placement makes heap exploits harder, but it
61850 also breaks ancient binaries (including anything libc5 based).
61851diff -urNp linux-3.1.1/init/main.c linux-3.1.1/init/main.c
61852--- linux-3.1.1/init/main.c 2011-11-11 15:19:27.000000000 -0500
61853+++ linux-3.1.1/init/main.c 2011-11-16 18:40:44.000000000 -0500
61854@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61855 extern void tc_init(void);
61856 #endif
61857
61858+extern void grsecurity_init(void);
61859+
61860 /*
61861 * Debug helper: via this flag we know that we are in 'early bootup code'
61862 * where only the boot processor is running with IRQ disabled. This means
61863@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61864
61865 __setup("reset_devices", set_reset_devices);
61866
61867+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61868+extern char pax_enter_kernel_user[];
61869+extern char pax_exit_kernel_user[];
61870+extern pgdval_t clone_pgd_mask;
61871+#endif
61872+
61873+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61874+static int __init setup_pax_nouderef(char *str)
61875+{
61876+#ifdef CONFIG_X86_32
61877+ unsigned int cpu;
61878+ struct desc_struct *gdt;
61879+
61880+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61881+ gdt = get_cpu_gdt_table(cpu);
61882+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61883+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61884+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61885+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61886+ }
61887+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61888+#else
61889+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61890+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61891+ clone_pgd_mask = ~(pgdval_t)0UL;
61892+#endif
61893+
61894+ return 0;
61895+}
61896+early_param("pax_nouderef", setup_pax_nouderef);
61897+#endif
61898+
61899+#ifdef CONFIG_PAX_SOFTMODE
61900+int pax_softmode;
61901+
61902+static int __init setup_pax_softmode(char *str)
61903+{
61904+ get_option(&str, &pax_softmode);
61905+ return 1;
61906+}
61907+__setup("pax_softmode=", setup_pax_softmode);
61908+#endif
61909+
61910 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61911 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61912 static const char *panic_later, *panic_param;
61913@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(ini
61914 {
61915 int count = preempt_count();
61916 int ret;
61917+ const char *msg1 = "", *msg2 = "";
61918
61919 if (initcall_debug)
61920 ret = do_one_initcall_debug(fn);
61921@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(ini
61922 sprintf(msgbuf, "error code %d ", ret);
61923
61924 if (preempt_count() != count) {
61925- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61926+ msg1 = " preemption imbalance";
61927 preempt_count() = count;
61928 }
61929 if (irqs_disabled()) {
61930- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61931+ msg2 = " disabled interrupts";
61932 local_irq_enable();
61933 }
61934- if (msgbuf[0]) {
61935- printk("initcall %pF returned with %s\n", fn, msgbuf);
61936+ if (msgbuf[0] || *msg1 || *msg2) {
61937+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61938 }
61939
61940 return ret;
61941@@ -817,7 +863,7 @@ static int __init kernel_init(void * unu
61942 do_basic_setup();
61943
61944 /* Open the /dev/console on the rootfs, this should never fail */
61945- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61946+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61947 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61948
61949 (void) sys_dup(0);
61950@@ -830,11 +876,13 @@ static int __init kernel_init(void * unu
61951 if (!ramdisk_execute_command)
61952 ramdisk_execute_command = "/init";
61953
61954- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61955+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61956 ramdisk_execute_command = NULL;
61957 prepare_namespace();
61958 }
61959
61960+ grsecurity_init();
61961+
61962 /*
61963 * Ok, we have completed the initial bootup, and
61964 * we're essentially up and running. Get rid of the
61965diff -urNp linux-3.1.1/ipc/mqueue.c linux-3.1.1/ipc/mqueue.c
61966--- linux-3.1.1/ipc/mqueue.c 2011-11-11 15:19:27.000000000 -0500
61967+++ linux-3.1.1/ipc/mqueue.c 2011-11-16 18:40:44.000000000 -0500
61968@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61969 mq_bytes = (mq_msg_tblsz +
61970 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61971
61972+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61973 spin_lock(&mq_lock);
61974 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61975 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61976diff -urNp linux-3.1.1/ipc/msg.c linux-3.1.1/ipc/msg.c
61977--- linux-3.1.1/ipc/msg.c 2011-11-11 15:19:27.000000000 -0500
61978+++ linux-3.1.1/ipc/msg.c 2011-11-16 18:39:08.000000000 -0500
61979@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61980 return security_msg_queue_associate(msq, msgflg);
61981 }
61982
61983+static struct ipc_ops msg_ops = {
61984+ .getnew = newque,
61985+ .associate = msg_security,
61986+ .more_checks = NULL
61987+};
61988+
61989 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61990 {
61991 struct ipc_namespace *ns;
61992- struct ipc_ops msg_ops;
61993 struct ipc_params msg_params;
61994
61995 ns = current->nsproxy->ipc_ns;
61996
61997- msg_ops.getnew = newque;
61998- msg_ops.associate = msg_security;
61999- msg_ops.more_checks = NULL;
62000-
62001 msg_params.key = key;
62002 msg_params.flg = msgflg;
62003
62004diff -urNp linux-3.1.1/ipc/sem.c linux-3.1.1/ipc/sem.c
62005--- linux-3.1.1/ipc/sem.c 2011-11-11 15:19:27.000000000 -0500
62006+++ linux-3.1.1/ipc/sem.c 2011-11-16 18:40:44.000000000 -0500
62007@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
62008 return 0;
62009 }
62010
62011+static struct ipc_ops sem_ops = {
62012+ .getnew = newary,
62013+ .associate = sem_security,
62014+ .more_checks = sem_more_checks
62015+};
62016+
62017 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62018 {
62019 struct ipc_namespace *ns;
62020- struct ipc_ops sem_ops;
62021 struct ipc_params sem_params;
62022
62023 ns = current->nsproxy->ipc_ns;
62024@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62025 if (nsems < 0 || nsems > ns->sc_semmsl)
62026 return -EINVAL;
62027
62028- sem_ops.getnew = newary;
62029- sem_ops.associate = sem_security;
62030- sem_ops.more_checks = sem_more_checks;
62031-
62032 sem_params.key = key;
62033 sem_params.flg = semflg;
62034 sem_params.u.nsems = nsems;
62035@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namesp
62036 int nsems;
62037 struct list_head tasks;
62038
62039+ pax_track_stack();
62040+
62041 sma = sem_lock_check(ns, semid);
62042 if (IS_ERR(sma))
62043 return PTR_ERR(sma);
62044@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62045 struct ipc_namespace *ns;
62046 struct list_head tasks;
62047
62048+ pax_track_stack();
62049+
62050 ns = current->nsproxy->ipc_ns;
62051
62052 if (nsops < 1 || semid < 0)
62053diff -urNp linux-3.1.1/ipc/shm.c linux-3.1.1/ipc/shm.c
62054--- linux-3.1.1/ipc/shm.c 2011-11-11 15:19:27.000000000 -0500
62055+++ linux-3.1.1/ipc/shm.c 2011-11-16 18:40:44.000000000 -0500
62056@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
62057 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62058 #endif
62059
62060+#ifdef CONFIG_GRKERNSEC
62061+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62062+ const time_t shm_createtime, const uid_t cuid,
62063+ const int shmid);
62064+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62065+ const time_t shm_createtime);
62066+#endif
62067+
62068 void shm_init_ns(struct ipc_namespace *ns)
62069 {
62070 ns->shm_ctlmax = SHMMAX;
62071@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *
62072 shp->shm_lprid = 0;
62073 shp->shm_atim = shp->shm_dtim = 0;
62074 shp->shm_ctim = get_seconds();
62075+#ifdef CONFIG_GRKERNSEC
62076+ {
62077+ struct timespec timeval;
62078+ do_posix_clock_monotonic_gettime(&timeval);
62079+
62080+ shp->shm_createtime = timeval.tv_sec;
62081+ }
62082+#endif
62083 shp->shm_segsz = size;
62084 shp->shm_nattch = 0;
62085 shp->shm_file = file;
62086@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct
62087 return 0;
62088 }
62089
62090+static struct ipc_ops shm_ops = {
62091+ .getnew = newseg,
62092+ .associate = shm_security,
62093+ .more_checks = shm_more_checks
62094+};
62095+
62096 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62097 {
62098 struct ipc_namespace *ns;
62099- struct ipc_ops shm_ops;
62100 struct ipc_params shm_params;
62101
62102 ns = current->nsproxy->ipc_ns;
62103
62104- shm_ops.getnew = newseg;
62105- shm_ops.associate = shm_security;
62106- shm_ops.more_checks = shm_more_checks;
62107-
62108 shm_params.key = key;
62109 shm_params.flg = shmflg;
62110 shm_params.u.size = size;
62111@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
62112 case SHM_LOCK:
62113 case SHM_UNLOCK:
62114 {
62115- struct file *uninitialized_var(shm_file);
62116-
62117 lru_add_drain_all(); /* drain pagevecs to lru lists */
62118
62119 shp = shm_lock_check(ns, shmid);
62120@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *sh
62121 if (err)
62122 goto out_unlock;
62123
62124+#ifdef CONFIG_GRKERNSEC
62125+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62126+ shp->shm_perm.cuid, shmid) ||
62127+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62128+ err = -EACCES;
62129+ goto out_unlock;
62130+ }
62131+#endif
62132+
62133 path = shp->shm_file->f_path;
62134 path_get(&path);
62135 shp->shm_nattch++;
62136+#ifdef CONFIG_GRKERNSEC
62137+ shp->shm_lapid = current->pid;
62138+#endif
62139 size = i_size_read(path.dentry->d_inode);
62140 shm_unlock(shp);
62141
62142diff -urNp linux-3.1.1/kernel/acct.c linux-3.1.1/kernel/acct.c
62143--- linux-3.1.1/kernel/acct.c 2011-11-11 15:19:27.000000000 -0500
62144+++ linux-3.1.1/kernel/acct.c 2011-11-16 18:39:08.000000000 -0500
62145@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
62146 */
62147 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62148 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62149- file->f_op->write(file, (char *)&ac,
62150+ file->f_op->write(file, (char __force_user *)&ac,
62151 sizeof(acct_t), &file->f_pos);
62152 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62153 set_fs(fs);
62154diff -urNp linux-3.1.1/kernel/audit.c linux-3.1.1/kernel/audit.c
62155--- linux-3.1.1/kernel/audit.c 2011-11-11 15:19:27.000000000 -0500
62156+++ linux-3.1.1/kernel/audit.c 2011-11-16 18:39:08.000000000 -0500
62157@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62158 3) suppressed due to audit_rate_limit
62159 4) suppressed due to audit_backlog_limit
62160 */
62161-static atomic_t audit_lost = ATOMIC_INIT(0);
62162+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62163
62164 /* The netlink socket. */
62165 static struct sock *audit_sock;
62166@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62167 unsigned long now;
62168 int print;
62169
62170- atomic_inc(&audit_lost);
62171+ atomic_inc_unchecked(&audit_lost);
62172
62173 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62174
62175@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62176 printk(KERN_WARNING
62177 "audit: audit_lost=%d audit_rate_limit=%d "
62178 "audit_backlog_limit=%d\n",
62179- atomic_read(&audit_lost),
62180+ atomic_read_unchecked(&audit_lost),
62181 audit_rate_limit,
62182 audit_backlog_limit);
62183 audit_panic(message);
62184@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
62185 status_set.pid = audit_pid;
62186 status_set.rate_limit = audit_rate_limit;
62187 status_set.backlog_limit = audit_backlog_limit;
62188- status_set.lost = atomic_read(&audit_lost);
62189+ status_set.lost = atomic_read_unchecked(&audit_lost);
62190 status_set.backlog = skb_queue_len(&audit_skb_queue);
62191 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62192 &status_set, sizeof(status_set));
62193diff -urNp linux-3.1.1/kernel/auditsc.c linux-3.1.1/kernel/auditsc.c
62194--- linux-3.1.1/kernel/auditsc.c 2011-11-11 15:19:27.000000000 -0500
62195+++ linux-3.1.1/kernel/auditsc.c 2011-11-16 18:39:08.000000000 -0500
62196@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
62197 }
62198
62199 /* global counter which is incremented every time something logs in */
62200-static atomic_t session_id = ATOMIC_INIT(0);
62201+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62202
62203 /**
62204 * audit_set_loginuid - set a task's audit_context loginuid
62205@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
62206 */
62207 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62208 {
62209- unsigned int sessionid = atomic_inc_return(&session_id);
62210+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62211 struct audit_context *context = task->audit_context;
62212
62213 if (context && context->in_syscall) {
62214diff -urNp linux-3.1.1/kernel/capability.c linux-3.1.1/kernel/capability.c
62215--- linux-3.1.1/kernel/capability.c 2011-11-11 15:19:27.000000000 -0500
62216+++ linux-3.1.1/kernel/capability.c 2011-11-16 18:40:44.000000000 -0500
62217@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
62218 * before modification is attempted and the application
62219 * fails.
62220 */
62221+ if (tocopy > ARRAY_SIZE(kdata))
62222+ return -EFAULT;
62223+
62224 if (copy_to_user(dataptr, kdata, tocopy
62225 * sizeof(struct __user_cap_data_struct))) {
62226 return -EFAULT;
62227@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62228 BUG();
62229 }
62230
62231- if (security_capable(ns, current_cred(), cap) == 0) {
62232+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62233 current->flags |= PF_SUPERPRIV;
62234 return true;
62235 }
62236@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62237 }
62238 EXPORT_SYMBOL(ns_capable);
62239
62240+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62241+{
62242+ if (unlikely(!cap_valid(cap))) {
62243+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62244+ BUG();
62245+ }
62246+
62247+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62248+ current->flags |= PF_SUPERPRIV;
62249+ return true;
62250+ }
62251+ return false;
62252+}
62253+EXPORT_SYMBOL(ns_capable_nolog);
62254+
62255+bool capable_nolog(int cap)
62256+{
62257+ return ns_capable_nolog(&init_user_ns, cap);
62258+}
62259+EXPORT_SYMBOL(capable_nolog);
62260+
62261 /**
62262 * task_ns_capable - Determine whether current task has a superior
62263 * capability targeted at a specific task's user namespace.
62264@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62265 }
62266 EXPORT_SYMBOL(task_ns_capable);
62267
62268+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62269+{
62270+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62271+}
62272+EXPORT_SYMBOL(task_ns_capable_nolog);
62273+
62274 /**
62275 * nsown_capable - Check superior capability to one's own user_ns
62276 * @cap: The capability in question
62277diff -urNp linux-3.1.1/kernel/cgroup.c linux-3.1.1/kernel/cgroup.c
62278--- linux-3.1.1/kernel/cgroup.c 2011-11-11 15:19:27.000000000 -0500
62279+++ linux-3.1.1/kernel/cgroup.c 2011-11-16 18:40:44.000000000 -0500
62280@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
62281 struct hlist_head *hhead;
62282 struct cg_cgroup_link *link;
62283
62284+ pax_track_stack();
62285+
62286 /* First see if we already have a cgroup group that matches
62287 * the desired set */
62288 read_lock(&css_set_lock);
62289diff -urNp linux-3.1.1/kernel/compat.c linux-3.1.1/kernel/compat.c
62290--- linux-3.1.1/kernel/compat.c 2011-11-11 15:19:27.000000000 -0500
62291+++ linux-3.1.1/kernel/compat.c 2011-11-16 18:40:44.000000000 -0500
62292@@ -13,6 +13,7 @@
62293
62294 #include <linux/linkage.h>
62295 #include <linux/compat.h>
62296+#include <linux/module.h>
62297 #include <linux/errno.h>
62298 #include <linux/time.h>
62299 #include <linux/signal.h>
62300@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(str
62301 mm_segment_t oldfs;
62302 long ret;
62303
62304- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62305+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62306 oldfs = get_fs();
62307 set_fs(KERNEL_DS);
62308 ret = hrtimer_nanosleep_restart(restart);
62309@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(str
62310 oldfs = get_fs();
62311 set_fs(KERNEL_DS);
62312 ret = hrtimer_nanosleep(&tu,
62313- rmtp ? (struct timespec __user *)&rmt : NULL,
62314+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62315 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62316 set_fs(oldfs);
62317
62318@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(co
62319 mm_segment_t old_fs = get_fs();
62320
62321 set_fs(KERNEL_DS);
62322- ret = sys_sigpending((old_sigset_t __user *) &s);
62323+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62324 set_fs(old_fs);
62325 if (ret == 0)
62326 ret = put_user(s, set);
62327@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(i
62328 old_fs = get_fs();
62329 set_fs(KERNEL_DS);
62330 ret = sys_sigprocmask(how,
62331- set ? (old_sigset_t __user *) &s : NULL,
62332- oset ? (old_sigset_t __user *) &s : NULL);
62333+ set ? (old_sigset_t __force_user *) &s : NULL,
62334+ oset ? (old_sigset_t __force_user *) &s : NULL);
62335 set_fs(old_fs);
62336 if (ret == 0)
62337 if (oset)
62338@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit
62339 mm_segment_t old_fs = get_fs();
62340
62341 set_fs(KERNEL_DS);
62342- ret = sys_old_getrlimit(resource, &r);
62343+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62344 set_fs(old_fs);
62345
62346 if (!ret) {
62347@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int
62348 mm_segment_t old_fs = get_fs();
62349
62350 set_fs(KERNEL_DS);
62351- ret = sys_getrusage(who, (struct rusage __user *) &r);
62352+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62353 set_fs(old_fs);
62354
62355 if (ret)
62356@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62357 set_fs (KERNEL_DS);
62358 ret = sys_wait4(pid,
62359 (stat_addr ?
62360- (unsigned int __user *) &status : NULL),
62361- options, (struct rusage __user *) &r);
62362+ (unsigned int __force_user *) &status : NULL),
62363+ options, (struct rusage __force_user *) &r);
62364 set_fs (old_fs);
62365
62366 if (ret > 0) {
62367@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int wh
62368 memset(&info, 0, sizeof(info));
62369
62370 set_fs(KERNEL_DS);
62371- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62372- uru ? (struct rusage __user *)&ru : NULL);
62373+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62374+ uru ? (struct rusage __force_user *)&ru : NULL);
62375 set_fs(old_fs);
62376
62377 if ((ret < 0) || (info.si_signo == 0))
62378@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t ti
62379 oldfs = get_fs();
62380 set_fs(KERNEL_DS);
62381 err = sys_timer_settime(timer_id, flags,
62382- (struct itimerspec __user *) &newts,
62383- (struct itimerspec __user *) &oldts);
62384+ (struct itimerspec __force_user *) &newts,
62385+ (struct itimerspec __force_user *) &oldts);
62386 set_fs(oldfs);
62387 if (!err && old && put_compat_itimerspec(old, &oldts))
62388 return -EFAULT;
62389@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t ti
62390 oldfs = get_fs();
62391 set_fs(KERNEL_DS);
62392 err = sys_timer_gettime(timer_id,
62393- (struct itimerspec __user *) &ts);
62394+ (struct itimerspec __force_user *) &ts);
62395 set_fs(oldfs);
62396 if (!err && put_compat_itimerspec(setting, &ts))
62397 return -EFAULT;
62398@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t
62399 oldfs = get_fs();
62400 set_fs(KERNEL_DS);
62401 err = sys_clock_settime(which_clock,
62402- (struct timespec __user *) &ts);
62403+ (struct timespec __force_user *) &ts);
62404 set_fs(oldfs);
62405 return err;
62406 }
62407@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t
62408 oldfs = get_fs();
62409 set_fs(KERNEL_DS);
62410 err = sys_clock_gettime(which_clock,
62411- (struct timespec __user *) &ts);
62412+ (struct timespec __force_user *) &ts);
62413 set_fs(oldfs);
62414 if (!err && put_compat_timespec(&ts, tp))
62415 return -EFAULT;
62416@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t
62417
62418 oldfs = get_fs();
62419 set_fs(KERNEL_DS);
62420- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62421+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62422 set_fs(oldfs);
62423
62424 err = compat_put_timex(utp, &txc);
62425@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t w
62426 oldfs = get_fs();
62427 set_fs(KERNEL_DS);
62428 err = sys_clock_getres(which_clock,
62429- (struct timespec __user *) &ts);
62430+ (struct timespec __force_user *) &ts);
62431 set_fs(oldfs);
62432 if (!err && tp && put_compat_timespec(&ts, tp))
62433 return -EFAULT;
62434@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_resta
62435 long err;
62436 mm_segment_t oldfs;
62437 struct timespec tu;
62438- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62439+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62440
62441- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62442+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62443 oldfs = get_fs();
62444 set_fs(KERNEL_DS);
62445 err = clock_nanosleep_restart(restart);
62446@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_
62447 oldfs = get_fs();
62448 set_fs(KERNEL_DS);
62449 err = sys_clock_nanosleep(which_clock, flags,
62450- (struct timespec __user *) &in,
62451- (struct timespec __user *) &out);
62452+ (struct timespec __force_user *) &in,
62453+ (struct timespec __force_user *) &out);
62454 set_fs(oldfs);
62455
62456 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62457diff -urNp linux-3.1.1/kernel/configs.c linux-3.1.1/kernel/configs.c
62458--- linux-3.1.1/kernel/configs.c 2011-11-11 15:19:27.000000000 -0500
62459+++ linux-3.1.1/kernel/configs.c 2011-11-16 18:40:44.000000000 -0500
62460@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62461 struct proc_dir_entry *entry;
62462
62463 /* create the current config file */
62464+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62465+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62466+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62467+ &ikconfig_file_ops);
62468+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62469+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62470+ &ikconfig_file_ops);
62471+#endif
62472+#else
62473 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62474 &ikconfig_file_ops);
62475+#endif
62476+
62477 if (!entry)
62478 return -ENOMEM;
62479
62480diff -urNp linux-3.1.1/kernel/cred.c linux-3.1.1/kernel/cred.c
62481--- linux-3.1.1/kernel/cred.c 2011-11-11 15:19:27.000000000 -0500
62482+++ linux-3.1.1/kernel/cred.c 2011-11-16 18:40:44.000000000 -0500
62483@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62484 */
62485 void __put_cred(struct cred *cred)
62486 {
62487+ pax_track_stack();
62488+
62489 kdebug("__put_cred(%p{%d,%d})", cred,
62490 atomic_read(&cred->usage),
62491 read_cred_subscribers(cred));
62492@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62493 {
62494 struct cred *cred;
62495
62496+ pax_track_stack();
62497+
62498 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62499 atomic_read(&tsk->cred->usage),
62500 read_cred_subscribers(tsk->cred));
62501@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62502 {
62503 const struct cred *cred;
62504
62505+ pax_track_stack();
62506+
62507 rcu_read_lock();
62508
62509 do {
62510@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62511 {
62512 struct cred *new;
62513
62514+ pax_track_stack();
62515+
62516 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62517 if (!new)
62518 return NULL;
62519@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62520 const struct cred *old;
62521 struct cred *new;
62522
62523+ pax_track_stack();
62524+
62525 validate_process_creds();
62526
62527 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62528@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62529 struct thread_group_cred *tgcred = NULL;
62530 struct cred *new;
62531
62532+ pax_track_stack();
62533+
62534 #ifdef CONFIG_KEYS
62535 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62536 if (!tgcred)
62537@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62538 struct cred *new;
62539 int ret;
62540
62541+ pax_track_stack();
62542+
62543 if (
62544 #ifdef CONFIG_KEYS
62545 !p->cred->thread_keyring &&
62546@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62547 struct task_struct *task = current;
62548 const struct cred *old = task->real_cred;
62549
62550+ pax_track_stack();
62551+
62552 kdebug("commit_creds(%p{%d,%d})", new,
62553 atomic_read(&new->usage),
62554 read_cred_subscribers(new));
62555@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62556
62557 get_cred(new); /* we will require a ref for the subj creds too */
62558
62559+ gr_set_role_label(task, new->uid, new->gid);
62560+
62561 /* dumpability changes */
62562 if (old->euid != new->euid ||
62563 old->egid != new->egid ||
62564@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62565 */
62566 void abort_creds(struct cred *new)
62567 {
62568+ pax_track_stack();
62569+
62570 kdebug("abort_creds(%p{%d,%d})", new,
62571 atomic_read(&new->usage),
62572 read_cred_subscribers(new));
62573@@ -572,6 +592,8 @@ const struct cred *override_creds(const
62574 {
62575 const struct cred *old = current->cred;
62576
62577+ pax_track_stack();
62578+
62579 kdebug("override_creds(%p{%d,%d})", new,
62580 atomic_read(&new->usage),
62581 read_cred_subscribers(new));
62582@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old
62583 {
62584 const struct cred *override = current->cred;
62585
62586+ pax_track_stack();
62587+
62588 kdebug("revert_creds(%p{%d,%d})", old,
62589 atomic_read(&old->usage),
62590 read_cred_subscribers(old));
62591@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62592 const struct cred *old;
62593 struct cred *new;
62594
62595+ pax_track_stack();
62596+
62597 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62598 if (!new)
62599 return NULL;
62600@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62601 */
62602 int set_security_override(struct cred *new, u32 secid)
62603 {
62604+ pax_track_stack();
62605+
62606 return security_kernel_act_as(new, secid);
62607 }
62608 EXPORT_SYMBOL(set_security_override);
62609@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struc
62610 u32 secid;
62611 int ret;
62612
62613+ pax_track_stack();
62614+
62615 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62616 if (ret < 0)
62617 return ret;
62618diff -urNp linux-3.1.1/kernel/debug/debug_core.c linux-3.1.1/kernel/debug/debug_core.c
62619--- linux-3.1.1/kernel/debug/debug_core.c 2011-11-11 15:19:27.000000000 -0500
62620+++ linux-3.1.1/kernel/debug/debug_core.c 2011-11-16 18:39:08.000000000 -0500
62621@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62622 */
62623 static atomic_t masters_in_kgdb;
62624 static atomic_t slaves_in_kgdb;
62625-static atomic_t kgdb_break_tasklet_var;
62626+static atomic_unchecked_t kgdb_break_tasklet_var;
62627 atomic_t kgdb_setting_breakpoint;
62628
62629 struct task_struct *kgdb_usethread;
62630@@ -129,7 +129,7 @@ int kgdb_single_step;
62631 static pid_t kgdb_sstep_pid;
62632
62633 /* to keep track of the CPU which is doing the single stepping*/
62634-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62635+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62636
62637 /*
62638 * If you are debugging a problem where roundup (the collection of
62639@@ -542,7 +542,7 @@ return_normal:
62640 * kernel will only try for the value of sstep_tries before
62641 * giving up and continuing on.
62642 */
62643- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62644+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62645 (kgdb_info[cpu].task &&
62646 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62647 atomic_set(&kgdb_active, -1);
62648@@ -636,8 +636,8 @@ cpu_master_loop:
62649 }
62650
62651 kgdb_restore:
62652- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62653- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62654+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62655+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62656 if (kgdb_info[sstep_cpu].task)
62657 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62658 else
62659@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62660 static void kgdb_tasklet_bpt(unsigned long ing)
62661 {
62662 kgdb_breakpoint();
62663- atomic_set(&kgdb_break_tasklet_var, 0);
62664+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62665 }
62666
62667 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62668
62669 void kgdb_schedule_breakpoint(void)
62670 {
62671- if (atomic_read(&kgdb_break_tasklet_var) ||
62672+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62673 atomic_read(&kgdb_active) != -1 ||
62674 atomic_read(&kgdb_setting_breakpoint))
62675 return;
62676- atomic_inc(&kgdb_break_tasklet_var);
62677+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62678 tasklet_schedule(&kgdb_tasklet_breakpoint);
62679 }
62680 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62681diff -urNp linux-3.1.1/kernel/debug/kdb/kdb_main.c linux-3.1.1/kernel/debug/kdb/kdb_main.c
62682--- linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-11 15:19:27.000000000 -0500
62683+++ linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-16 18:39:08.000000000 -0500
62684@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62685 list_for_each_entry(mod, kdb_modules, list) {
62686
62687 kdb_printf("%-20s%8u 0x%p ", mod->name,
62688- mod->core_size, (void *)mod);
62689+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62690 #ifdef CONFIG_MODULE_UNLOAD
62691 kdb_printf("%4d ", module_refcount(mod));
62692 #endif
62693@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62694 kdb_printf(" (Loading)");
62695 else
62696 kdb_printf(" (Live)");
62697- kdb_printf(" 0x%p", mod->module_core);
62698+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62699
62700 #ifdef CONFIG_MODULE_UNLOAD
62701 {
62702diff -urNp linux-3.1.1/kernel/events/core.c linux-3.1.1/kernel/events/core.c
62703--- linux-3.1.1/kernel/events/core.c 2011-11-11 15:19:27.000000000 -0500
62704+++ linux-3.1.1/kernel/events/core.c 2011-11-16 18:39:08.000000000 -0500
62705@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_
62706 return 0;
62707 }
62708
62709-static atomic64_t perf_event_id;
62710+static atomic64_unchecked_t perf_event_id;
62711
62712 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62713 enum event_type_t event_type);
62714@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info
62715
62716 static inline u64 perf_event_count(struct perf_event *event)
62717 {
62718- return local64_read(&event->count) + atomic64_read(&event->child_count);
62719+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62720 }
62721
62722 static u64 perf_event_read(struct perf_event *event)
62723@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_ev
62724 mutex_lock(&event->child_mutex);
62725 total += perf_event_read(event);
62726 *enabled += event->total_time_enabled +
62727- atomic64_read(&event->child_total_time_enabled);
62728+ atomic64_read_unchecked(&event->child_total_time_enabled);
62729 *running += event->total_time_running +
62730- atomic64_read(&event->child_total_time_running);
62731+ atomic64_read_unchecked(&event->child_total_time_running);
62732
62733 list_for_each_entry(child, &event->child_list, child_list) {
62734 total += perf_event_read(child);
62735@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct p
62736 userpg->offset -= local64_read(&event->hw.prev_count);
62737
62738 userpg->time_enabled = enabled +
62739- atomic64_read(&event->child_total_time_enabled);
62740+ atomic64_read_unchecked(&event->child_total_time_enabled);
62741
62742 userpg->time_running = running +
62743- atomic64_read(&event->child_total_time_running);
62744+ atomic64_read_unchecked(&event->child_total_time_running);
62745
62746 barrier();
62747 ++userpg->lock;
62748@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct
62749 values[n++] = perf_event_count(event);
62750 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62751 values[n++] = enabled +
62752- atomic64_read(&event->child_total_time_enabled);
62753+ atomic64_read_unchecked(&event->child_total_time_enabled);
62754 }
62755 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62756 values[n++] = running +
62757- atomic64_read(&event->child_total_time_running);
62758+ atomic64_read_unchecked(&event->child_total_time_running);
62759 }
62760 if (read_format & PERF_FORMAT_ID)
62761 values[n++] = primary_event_id(event);
62762@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct
62763 * need to add enough zero bytes after the string to handle
62764 * the 64bit alignment we do later.
62765 */
62766- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62767+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62768 if (!buf) {
62769 name = strncpy(tmp, "//enomem", sizeof(tmp));
62770 goto got_name;
62771 }
62772- name = d_path(&file->f_path, buf, PATH_MAX);
62773+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62774 if (IS_ERR(name)) {
62775 name = strncpy(tmp, "//toolong", sizeof(tmp));
62776 goto got_name;
62777@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr
62778 event->parent = parent_event;
62779
62780 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62781- event->id = atomic64_inc_return(&perf_event_id);
62782+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62783
62784 event->state = PERF_EVENT_STATE_INACTIVE;
62785
62786@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf
62787 /*
62788 * Add back the child's count to the parent's count:
62789 */
62790- atomic64_add(child_val, &parent_event->child_count);
62791- atomic64_add(child_event->total_time_enabled,
62792+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62793+ atomic64_add_unchecked(child_event->total_time_enabled,
62794 &parent_event->child_total_time_enabled);
62795- atomic64_add(child_event->total_time_running,
62796+ atomic64_add_unchecked(child_event->total_time_running,
62797 &parent_event->child_total_time_running);
62798
62799 /*
62800diff -urNp linux-3.1.1/kernel/exit.c linux-3.1.1/kernel/exit.c
62801--- linux-3.1.1/kernel/exit.c 2011-11-11 15:19:27.000000000 -0500
62802+++ linux-3.1.1/kernel/exit.c 2011-11-16 19:33:48.000000000 -0500
62803@@ -57,6 +57,10 @@
62804 #include <asm/pgtable.h>
62805 #include <asm/mmu_context.h>
62806
62807+#ifdef CONFIG_GRKERNSEC
62808+extern rwlock_t grsec_exec_file_lock;
62809+#endif
62810+
62811 static void exit_mm(struct task_struct * tsk);
62812
62813 static void __unhash_process(struct task_struct *p, bool group_dead)
62814@@ -168,6 +172,10 @@ void release_task(struct task_struct * p
62815 struct task_struct *leader;
62816 int zap_leader;
62817 repeat:
62818+#ifdef CONFIG_NET
62819+ gr_del_task_from_ip_table(p);
62820+#endif
62821+
62822 /* don't need to get the RCU readlock here - the process is dead and
62823 * can't be modifying its own credentials. But shut RCU-lockdep up */
62824 rcu_read_lock();
62825@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
62826 {
62827 write_lock_irq(&tasklist_lock);
62828
62829+#ifdef CONFIG_GRKERNSEC
62830+ write_lock(&grsec_exec_file_lock);
62831+ if (current->exec_file) {
62832+ fput(current->exec_file);
62833+ current->exec_file = NULL;
62834+ }
62835+ write_unlock(&grsec_exec_file_lock);
62836+#endif
62837+
62838 ptrace_unlink(current);
62839 /* Reparent to init */
62840 current->real_parent = current->parent = kthreadd_task;
62841 list_move_tail(&current->sibling, &current->real_parent->children);
62842
62843+ gr_set_kernel_label(current);
62844+
62845 /* Set the exit signal to SIGCHLD so we signal init on exit */
62846 current->exit_signal = SIGCHLD;
62847
62848@@ -380,7 +399,7 @@ int allow_signal(int sig)
62849 * know it'll be handled, so that they don't get converted to
62850 * SIGKILL or just silently dropped.
62851 */
62852- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62853+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62854 recalc_sigpending();
62855 spin_unlock_irq(&current->sighand->siglock);
62856 return 0;
62857@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
62858 vsnprintf(current->comm, sizeof(current->comm), name, args);
62859 va_end(args);
62860
62861+#ifdef CONFIG_GRKERNSEC
62862+ write_lock(&grsec_exec_file_lock);
62863+ if (current->exec_file) {
62864+ fput(current->exec_file);
62865+ current->exec_file = NULL;
62866+ }
62867+ write_unlock(&grsec_exec_file_lock);
62868+#endif
62869+
62870+ gr_set_kernel_label(current);
62871+
62872 /*
62873 * If we were started as result of loading a module, close all of the
62874 * user space pages. We don't need them, and if we didn't close them
62875@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
62876 struct task_struct *tsk = current;
62877 int group_dead;
62878
62879+ set_fs(USER_DS);
62880+
62881 profile_task_exit(tsk);
62882
62883 WARN_ON(blk_needs_flush_plug(tsk));
62884@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
62885 * mm_release()->clear_child_tid() from writing to a user-controlled
62886 * kernel address.
62887 */
62888- set_fs(USER_DS);
62889
62890 ptrace_event(PTRACE_EVENT_EXIT, code);
62891
62892@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
62893 tsk->exit_code = code;
62894 taskstats_exit(tsk, group_dead);
62895
62896+ gr_acl_handle_psacct(tsk, code);
62897+ gr_acl_handle_exit();
62898+
62899 exit_mm(tsk);
62900
62901 if (group_dead)
62902diff -urNp linux-3.1.1/kernel/fork.c linux-3.1.1/kernel/fork.c
62903--- linux-3.1.1/kernel/fork.c 2011-11-11 15:19:27.000000000 -0500
62904+++ linux-3.1.1/kernel/fork.c 2011-11-16 19:36:31.000000000 -0500
62905@@ -285,7 +285,7 @@ static struct task_struct *dup_task_stru
62906 *stackend = STACK_END_MAGIC; /* for overflow detection */
62907
62908 #ifdef CONFIG_CC_STACKPROTECTOR
62909- tsk->stack_canary = get_random_int();
62910+ tsk->stack_canary = pax_get_random_long();
62911 #endif
62912
62913 /*
62914@@ -309,13 +309,77 @@ out:
62915 }
62916
62917 #ifdef CONFIG_MMU
62918+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62919+{
62920+ struct vm_area_struct *tmp;
62921+ unsigned long charge;
62922+ struct mempolicy *pol;
62923+ struct file *file;
62924+
62925+ charge = 0;
62926+ if (mpnt->vm_flags & VM_ACCOUNT) {
62927+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62928+ if (security_vm_enough_memory(len))
62929+ goto fail_nomem;
62930+ charge = len;
62931+ }
62932+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62933+ if (!tmp)
62934+ goto fail_nomem;
62935+ *tmp = *mpnt;
62936+ tmp->vm_mm = mm;
62937+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62938+ pol = mpol_dup(vma_policy(mpnt));
62939+ if (IS_ERR(pol))
62940+ goto fail_nomem_policy;
62941+ vma_set_policy(tmp, pol);
62942+ if (anon_vma_fork(tmp, mpnt))
62943+ goto fail_nomem_anon_vma_fork;
62944+ tmp->vm_flags &= ~VM_LOCKED;
62945+ tmp->vm_next = tmp->vm_prev = NULL;
62946+ tmp->vm_mirror = NULL;
62947+ file = tmp->vm_file;
62948+ if (file) {
62949+ struct inode *inode = file->f_path.dentry->d_inode;
62950+ struct address_space *mapping = file->f_mapping;
62951+
62952+ get_file(file);
62953+ if (tmp->vm_flags & VM_DENYWRITE)
62954+ atomic_dec(&inode->i_writecount);
62955+ mutex_lock(&mapping->i_mmap_mutex);
62956+ if (tmp->vm_flags & VM_SHARED)
62957+ mapping->i_mmap_writable++;
62958+ flush_dcache_mmap_lock(mapping);
62959+ /* insert tmp into the share list, just after mpnt */
62960+ vma_prio_tree_add(tmp, mpnt);
62961+ flush_dcache_mmap_unlock(mapping);
62962+ mutex_unlock(&mapping->i_mmap_mutex);
62963+ }
62964+
62965+ /*
62966+ * Clear hugetlb-related page reserves for children. This only
62967+ * affects MAP_PRIVATE mappings. Faults generated by the child
62968+ * are not guaranteed to succeed, even if read-only
62969+ */
62970+ if (is_vm_hugetlb_page(tmp))
62971+ reset_vma_resv_huge_pages(tmp);
62972+
62973+ return tmp;
62974+
62975+fail_nomem_anon_vma_fork:
62976+ mpol_put(pol);
62977+fail_nomem_policy:
62978+ kmem_cache_free(vm_area_cachep, tmp);
62979+fail_nomem:
62980+ vm_unacct_memory(charge);
62981+ return NULL;
62982+}
62983+
62984 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62985 {
62986 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62987 struct rb_node **rb_link, *rb_parent;
62988 int retval;
62989- unsigned long charge;
62990- struct mempolicy *pol;
62991
62992 down_write(&oldmm->mmap_sem);
62993 flush_cache_dup_mm(oldmm);
62994@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm
62995 mm->locked_vm = 0;
62996 mm->mmap = NULL;
62997 mm->mmap_cache = NULL;
62998- mm->free_area_cache = oldmm->mmap_base;
62999- mm->cached_hole_size = ~0UL;
63000+ mm->free_area_cache = oldmm->free_area_cache;
63001+ mm->cached_hole_size = oldmm->cached_hole_size;
63002 mm->map_count = 0;
63003 cpumask_clear(mm_cpumask(mm));
63004 mm->mm_rb = RB_ROOT;
63005@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm
63006
63007 prev = NULL;
63008 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63009- struct file *file;
63010-
63011 if (mpnt->vm_flags & VM_DONTCOPY) {
63012 long pages = vma_pages(mpnt);
63013 mm->total_vm -= pages;
63014@@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm
63015 -pages);
63016 continue;
63017 }
63018- charge = 0;
63019- if (mpnt->vm_flags & VM_ACCOUNT) {
63020- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63021- if (security_vm_enough_memory(len))
63022- goto fail_nomem;
63023- charge = len;
63024- }
63025- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63026- if (!tmp)
63027- goto fail_nomem;
63028- *tmp = *mpnt;
63029- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63030- pol = mpol_dup(vma_policy(mpnt));
63031- retval = PTR_ERR(pol);
63032- if (IS_ERR(pol))
63033- goto fail_nomem_policy;
63034- vma_set_policy(tmp, pol);
63035- tmp->vm_mm = mm;
63036- if (anon_vma_fork(tmp, mpnt))
63037- goto fail_nomem_anon_vma_fork;
63038- tmp->vm_flags &= ~VM_LOCKED;
63039- tmp->vm_next = tmp->vm_prev = NULL;
63040- file = tmp->vm_file;
63041- if (file) {
63042- struct inode *inode = file->f_path.dentry->d_inode;
63043- struct address_space *mapping = file->f_mapping;
63044-
63045- get_file(file);
63046- if (tmp->vm_flags & VM_DENYWRITE)
63047- atomic_dec(&inode->i_writecount);
63048- mutex_lock(&mapping->i_mmap_mutex);
63049- if (tmp->vm_flags & VM_SHARED)
63050- mapping->i_mmap_writable++;
63051- flush_dcache_mmap_lock(mapping);
63052- /* insert tmp into the share list, just after mpnt */
63053- vma_prio_tree_add(tmp, mpnt);
63054- flush_dcache_mmap_unlock(mapping);
63055- mutex_unlock(&mapping->i_mmap_mutex);
63056+ tmp = dup_vma(mm, mpnt);
63057+ if (!tmp) {
63058+ retval = -ENOMEM;
63059+ goto out;
63060 }
63061
63062 /*
63063- * Clear hugetlb-related page reserves for children. This only
63064- * affects MAP_PRIVATE mappings. Faults generated by the child
63065- * are not guaranteed to succeed, even if read-only
63066- */
63067- if (is_vm_hugetlb_page(tmp))
63068- reset_vma_resv_huge_pages(tmp);
63069-
63070- /*
63071 * Link in the new vma and copy the page table entries.
63072 */
63073 *pprev = tmp;
63074@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm
63075 if (retval)
63076 goto out;
63077 }
63078+
63079+#ifdef CONFIG_PAX_SEGMEXEC
63080+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63081+ struct vm_area_struct *mpnt_m;
63082+
63083+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63084+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63085+
63086+ if (!mpnt->vm_mirror)
63087+ continue;
63088+
63089+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63090+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63091+ mpnt->vm_mirror = mpnt_m;
63092+ } else {
63093+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63094+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63095+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63096+ mpnt->vm_mirror->vm_mirror = mpnt;
63097+ }
63098+ }
63099+ BUG_ON(mpnt_m);
63100+ }
63101+#endif
63102+
63103 /* a new mm has just been created */
63104 arch_dup_mmap(oldmm, mm);
63105 retval = 0;
63106@@ -430,14 +475,6 @@ out:
63107 flush_tlb_mm(oldmm);
63108 up_write(&oldmm->mmap_sem);
63109 return retval;
63110-fail_nomem_anon_vma_fork:
63111- mpol_put(pol);
63112-fail_nomem_policy:
63113- kmem_cache_free(vm_area_cachep, tmp);
63114-fail_nomem:
63115- retval = -ENOMEM;
63116- vm_unacct_memory(charge);
63117- goto out;
63118 }
63119
63120 static inline int mm_alloc_pgd(struct mm_struct *mm)
63121@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_f
63122 spin_unlock(&fs->lock);
63123 return -EAGAIN;
63124 }
63125- fs->users++;
63126+ atomic_inc(&fs->users);
63127 spin_unlock(&fs->lock);
63128 return 0;
63129 }
63130 tsk->fs = copy_fs_struct(fs);
63131 if (!tsk->fs)
63132 return -ENOMEM;
63133+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63134 return 0;
63135 }
63136
63137@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(
63138 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63139 #endif
63140 retval = -EAGAIN;
63141+
63142+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63143+
63144 if (atomic_read(&p->real_cred->user->processes) >=
63145 task_rlimit(p, RLIMIT_NPROC)) {
63146 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63147@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(
63148 if (clone_flags & CLONE_THREAD)
63149 p->tgid = current->tgid;
63150
63151+ gr_copy_label(p);
63152+
63153 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63154 /*
63155 * Clear TID on mm_release()?
63156@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
63157 bad_fork_free:
63158 free_task(p);
63159 fork_out:
63160+ gr_log_forkfail(retval);
63161+
63162 return ERR_PTR(retval);
63163 }
63164
63165@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
63166 if (clone_flags & CLONE_PARENT_SETTID)
63167 put_user(nr, parent_tidptr);
63168
63169+ gr_handle_brute_check();
63170+
63171 if (clone_flags & CLONE_VFORK) {
63172 p->vfork_done = &vfork;
63173 init_completion(&vfork);
63174@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unsh
63175 return 0;
63176
63177 /* don't need lock here; in the worst case we'll do useless copy */
63178- if (fs->users == 1)
63179+ if (atomic_read(&fs->users) == 1)
63180 return 0;
63181
63182 *new_fsp = copy_fs_struct(fs);
63183@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63184 fs = current->fs;
63185 spin_lock(&fs->lock);
63186 current->fs = new_fs;
63187- if (--fs->users)
63188+ gr_set_chroot_entries(current, &current->fs->root);
63189+ if (atomic_dec_return(&fs->users))
63190 new_fs = NULL;
63191 else
63192 new_fs = fs;
63193diff -urNp linux-3.1.1/kernel/futex.c linux-3.1.1/kernel/futex.c
63194--- linux-3.1.1/kernel/futex.c 2011-11-11 15:19:27.000000000 -0500
63195+++ linux-3.1.1/kernel/futex.c 2011-11-16 18:40:44.000000000 -0500
63196@@ -54,6 +54,7 @@
63197 #include <linux/mount.h>
63198 #include <linux/pagemap.h>
63199 #include <linux/syscalls.h>
63200+#include <linux/ptrace.h>
63201 #include <linux/signal.h>
63202 #include <linux/module.h>
63203 #include <linux/magic.h>
63204@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63205 struct page *page, *page_head;
63206 int err, ro = 0;
63207
63208+#ifdef CONFIG_PAX_SEGMEXEC
63209+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63210+ return -EFAULT;
63211+#endif
63212+
63213 /*
63214 * The futex address must be "naturally" aligned.
63215 */
63216@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63217 struct futex_q q = futex_q_init;
63218 int ret;
63219
63220+ pax_track_stack();
63221+
63222 if (!bitset)
63223 return -EINVAL;
63224 q.bitset = bitset;
63225@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63226 struct futex_q q = futex_q_init;
63227 int res, ret;
63228
63229+ pax_track_stack();
63230+
63231 if (!bitset)
63232 return -EINVAL;
63233
63234@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63235 {
63236 struct robust_list_head __user *head;
63237 unsigned long ret;
63238+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63239 const struct cred *cred = current_cred(), *pcred;
63240+#endif
63241
63242 if (!futex_cmpxchg_enabled)
63243 return -ENOSYS;
63244@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63245 if (!p)
63246 goto err_unlock;
63247 ret = -EPERM;
63248+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63249+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63250+ goto err_unlock;
63251+#else
63252 pcred = __task_cred(p);
63253 /* If victim is in different user_ns, then uids are not
63254 comparable, so we must have CAP_SYS_PTRACE */
63255@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63256 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63257 goto err_unlock;
63258 ok:
63259+#endif
63260 head = p->robust_list;
63261 rcu_read_unlock();
63262 }
63263@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63264 {
63265 u32 curval;
63266 int i;
63267+ mm_segment_t oldfs;
63268
63269 /*
63270 * This will fail and we want it. Some arch implementations do
63271@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63272 * implementation, the non-functional ones will return
63273 * -ENOSYS.
63274 */
63275+ oldfs = get_fs();
63276+ set_fs(USER_DS);
63277 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63278 futex_cmpxchg_enabled = 1;
63279+ set_fs(oldfs);
63280
63281 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63282 plist_head_init(&futex_queues[i].chain);
63283diff -urNp linux-3.1.1/kernel/futex_compat.c linux-3.1.1/kernel/futex_compat.c
63284--- linux-3.1.1/kernel/futex_compat.c 2011-11-11 15:19:27.000000000 -0500
63285+++ linux-3.1.1/kernel/futex_compat.c 2011-11-16 18:40:44.000000000 -0500
63286@@ -10,6 +10,7 @@
63287 #include <linux/compat.h>
63288 #include <linux/nsproxy.h>
63289 #include <linux/futex.h>
63290+#include <linux/ptrace.h>
63291
63292 #include <asm/uaccess.h>
63293
63294@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63295 {
63296 struct compat_robust_list_head __user *head;
63297 unsigned long ret;
63298- const struct cred *cred = current_cred(), *pcred;
63299+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63300+ const struct cred *cred = current_cred();
63301+ const struct cred *pcred;
63302+#endif
63303
63304 if (!futex_cmpxchg_enabled)
63305 return -ENOSYS;
63306@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63307 if (!p)
63308 goto err_unlock;
63309 ret = -EPERM;
63310+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63311+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63312+ goto err_unlock;
63313+#else
63314 pcred = __task_cred(p);
63315 /* If victim is in different user_ns, then uids are not
63316 comparable, so we must have CAP_SYS_PTRACE */
63317@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63318 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63319 goto err_unlock;
63320 ok:
63321+#endif
63322 head = p->compat_robust_list;
63323 rcu_read_unlock();
63324 }
63325diff -urNp linux-3.1.1/kernel/gcov/base.c linux-3.1.1/kernel/gcov/base.c
63326--- linux-3.1.1/kernel/gcov/base.c 2011-11-11 15:19:27.000000000 -0500
63327+++ linux-3.1.1/kernel/gcov/base.c 2011-11-16 18:39:08.000000000 -0500
63328@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63329 }
63330
63331 #ifdef CONFIG_MODULES
63332-static inline int within(void *addr, void *start, unsigned long size)
63333-{
63334- return ((addr >= start) && (addr < start + size));
63335-}
63336-
63337 /* Update list and generate events when modules are unloaded. */
63338 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63339 void *data)
63340@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63341 prev = NULL;
63342 /* Remove entries located in module from linked list. */
63343 for (info = gcov_info_head; info; info = info->next) {
63344- if (within(info, mod->module_core, mod->core_size)) {
63345+ if (within_module_core_rw((unsigned long)info, mod)) {
63346 if (prev)
63347 prev->next = info->next;
63348 else
63349diff -urNp linux-3.1.1/kernel/hrtimer.c linux-3.1.1/kernel/hrtimer.c
63350--- linux-3.1.1/kernel/hrtimer.c 2011-11-11 15:19:27.000000000 -0500
63351+++ linux-3.1.1/kernel/hrtimer.c 2011-11-16 18:39:08.000000000 -0500
63352@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63353 local_irq_restore(flags);
63354 }
63355
63356-static void run_hrtimer_softirq(struct softirq_action *h)
63357+static void run_hrtimer_softirq(void)
63358 {
63359 hrtimer_peek_ahead_timers();
63360 }
63361diff -urNp linux-3.1.1/kernel/jump_label.c linux-3.1.1/kernel/jump_label.c
63362--- linux-3.1.1/kernel/jump_label.c 2011-11-11 15:19:27.000000000 -0500
63363+++ linux-3.1.1/kernel/jump_label.c 2011-11-16 18:39:08.000000000 -0500
63364@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63365
63366 size = (((unsigned long)stop - (unsigned long)start)
63367 / sizeof(struct jump_entry));
63368+ pax_open_kernel();
63369 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63370+ pax_close_kernel();
63371 }
63372
63373 static void jump_label_update(struct jump_label_key *key, int enable);
63374@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63375 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63376 struct jump_entry *iter;
63377
63378+ pax_open_kernel();
63379 for (iter = iter_start; iter < iter_stop; iter++) {
63380 if (within_module_init(iter->code, mod))
63381 iter->code = 0;
63382 }
63383+ pax_close_kernel();
63384 }
63385
63386 static int
63387diff -urNp linux-3.1.1/kernel/kallsyms.c linux-3.1.1/kernel/kallsyms.c
63388--- linux-3.1.1/kernel/kallsyms.c 2011-11-11 15:19:27.000000000 -0500
63389+++ linux-3.1.1/kernel/kallsyms.c 2011-11-16 18:40:44.000000000 -0500
63390@@ -11,6 +11,9 @@
63391 * Changed the compression method from stem compression to "table lookup"
63392 * compression (see scripts/kallsyms.c for a more complete description)
63393 */
63394+#ifdef CONFIG_GRKERNSEC_HIDESYM
63395+#define __INCLUDED_BY_HIDESYM 1
63396+#endif
63397 #include <linux/kallsyms.h>
63398 #include <linux/module.h>
63399 #include <linux/init.h>
63400@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63401
63402 static inline int is_kernel_inittext(unsigned long addr)
63403 {
63404+ if (system_state != SYSTEM_BOOTING)
63405+ return 0;
63406+
63407 if (addr >= (unsigned long)_sinittext
63408 && addr <= (unsigned long)_einittext)
63409 return 1;
63410 return 0;
63411 }
63412
63413+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63414+#ifdef CONFIG_MODULES
63415+static inline int is_module_text(unsigned long addr)
63416+{
63417+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63418+ return 1;
63419+
63420+ addr = ktla_ktva(addr);
63421+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63422+}
63423+#else
63424+static inline int is_module_text(unsigned long addr)
63425+{
63426+ return 0;
63427+}
63428+#endif
63429+#endif
63430+
63431 static inline int is_kernel_text(unsigned long addr)
63432 {
63433 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63434@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63435
63436 static inline int is_kernel(unsigned long addr)
63437 {
63438+
63439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63440+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63441+ return 1;
63442+
63443+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63444+#else
63445 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63446+#endif
63447+
63448 return 1;
63449 return in_gate_area_no_mm(addr);
63450 }
63451
63452 static int is_ksym_addr(unsigned long addr)
63453 {
63454+
63455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63456+ if (is_module_text(addr))
63457+ return 0;
63458+#endif
63459+
63460 if (all_var)
63461 return is_kernel(addr);
63462
63463@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63464
63465 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63466 {
63467- iter->name[0] = '\0';
63468 iter->nameoff = get_symbol_offset(new_pos);
63469 iter->pos = new_pos;
63470 }
63471@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63472 {
63473 struct kallsym_iter *iter = m->private;
63474
63475+#ifdef CONFIG_GRKERNSEC_HIDESYM
63476+ if (current_uid())
63477+ return 0;
63478+#endif
63479+
63480 /* Some debugging symbols have no name. Ignore them. */
63481 if (!iter->name[0])
63482 return 0;
63483@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63484 struct kallsym_iter *iter;
63485 int ret;
63486
63487- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63488+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63489 if (!iter)
63490 return -ENOMEM;
63491 reset_iter(iter, 0);
63492diff -urNp linux-3.1.1/kernel/kexec.c linux-3.1.1/kernel/kexec.c
63493--- linux-3.1.1/kernel/kexec.c 2011-11-11 15:19:27.000000000 -0500
63494+++ linux-3.1.1/kernel/kexec.c 2011-11-16 18:39:08.000000000 -0500
63495@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63496 unsigned long flags)
63497 {
63498 struct compat_kexec_segment in;
63499- struct kexec_segment out, __user *ksegments;
63500+ struct kexec_segment out;
63501+ struct kexec_segment __user *ksegments;
63502 unsigned long i, result;
63503
63504 /* Don't allow clients that don't understand the native
63505diff -urNp linux-3.1.1/kernel/kmod.c linux-3.1.1/kernel/kmod.c
63506--- linux-3.1.1/kernel/kmod.c 2011-11-11 15:19:27.000000000 -0500
63507+++ linux-3.1.1/kernel/kmod.c 2011-11-16 18:40:44.000000000 -0500
63508@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63509 * If module auto-loading support is disabled then this function
63510 * becomes a no-operation.
63511 */
63512-int __request_module(bool wait, const char *fmt, ...)
63513+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63514 {
63515- va_list args;
63516 char module_name[MODULE_NAME_LEN];
63517 unsigned int max_modprobes;
63518 int ret;
63519- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63520+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63521 static char *envp[] = { "HOME=/",
63522 "TERM=linux",
63523 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63524@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63525 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63526 static int kmod_loop_msg;
63527
63528- va_start(args, fmt);
63529- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63530- va_end(args);
63531+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63532 if (ret >= MODULE_NAME_LEN)
63533 return -ENAMETOOLONG;
63534
63535@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63536 if (ret)
63537 return ret;
63538
63539+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63540+ if (!current_uid()) {
63541+ /* hack to workaround consolekit/udisks stupidity */
63542+ read_lock(&tasklist_lock);
63543+ if (!strcmp(current->comm, "mount") &&
63544+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63545+ read_unlock(&tasklist_lock);
63546+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63547+ return -EPERM;
63548+ }
63549+ read_unlock(&tasklist_lock);
63550+ }
63551+#endif
63552+
63553 /* If modprobe needs a service that is in a module, we get a recursive
63554 * loop. Limit the number of running kmod threads to max_threads/2 or
63555 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63556@@ -133,6 +144,47 @@ int __request_module(bool wait, const ch
63557 atomic_dec(&kmod_concurrent);
63558 return ret;
63559 }
63560+
63561+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63562+{
63563+ va_list args;
63564+ int ret;
63565+
63566+ va_start(args, fmt);
63567+ ret = ____request_module(wait, module_param, fmt, args);
63568+ va_end(args);
63569+
63570+ return ret;
63571+}
63572+
63573+int __request_module(bool wait, const char *fmt, ...)
63574+{
63575+ va_list args;
63576+ int ret;
63577+
63578+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63579+ if (current_uid()) {
63580+ char module_param[MODULE_NAME_LEN];
63581+
63582+ memset(module_param, 0, sizeof(module_param));
63583+
63584+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63585+
63586+ va_start(args, fmt);
63587+ ret = ____request_module(wait, module_param, fmt, args);
63588+ va_end(args);
63589+
63590+ return ret;
63591+ }
63592+#endif
63593+
63594+ va_start(args, fmt);
63595+ ret = ____request_module(wait, NULL, fmt, args);
63596+ va_end(args);
63597+
63598+ return ret;
63599+}
63600+
63601 EXPORT_SYMBOL(__request_module);
63602 #endif /* CONFIG_MODULES */
63603
63604@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63605 *
63606 * Thus the __user pointer cast is valid here.
63607 */
63608- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63609+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63610
63611 /*
63612 * If ret is 0, either ____call_usermodehelper failed and the
63613diff -urNp linux-3.1.1/kernel/kprobes.c linux-3.1.1/kernel/kprobes.c
63614--- linux-3.1.1/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
63615+++ linux-3.1.1/kernel/kprobes.c 2011-11-16 18:39:08.000000000 -0500
63616@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63617 * kernel image and loaded module images reside. This is required
63618 * so x86_64 can correctly handle the %rip-relative fixups.
63619 */
63620- kip->insns = module_alloc(PAGE_SIZE);
63621+ kip->insns = module_alloc_exec(PAGE_SIZE);
63622 if (!kip->insns) {
63623 kfree(kip);
63624 return NULL;
63625@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63626 */
63627 if (!list_is_singular(&kip->list)) {
63628 list_del(&kip->list);
63629- module_free(NULL, kip->insns);
63630+ module_free_exec(NULL, kip->insns);
63631 kfree(kip);
63632 }
63633 return 1;
63634@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63635 {
63636 int i, err = 0;
63637 unsigned long offset = 0, size = 0;
63638- char *modname, namebuf[128];
63639+ char *modname, namebuf[KSYM_NAME_LEN];
63640 const char *symbol_name;
63641 void *addr;
63642 struct kprobe_blackpoint *kb;
63643@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(st
63644 const char *sym = NULL;
63645 unsigned int i = *(loff_t *) v;
63646 unsigned long offset = 0;
63647- char *modname, namebuf[128];
63648+ char *modname, namebuf[KSYM_NAME_LEN];
63649
63650 head = &kprobe_table[i];
63651 preempt_disable();
63652diff -urNp linux-3.1.1/kernel/lockdep.c linux-3.1.1/kernel/lockdep.c
63653--- linux-3.1.1/kernel/lockdep.c 2011-11-11 15:19:27.000000000 -0500
63654+++ linux-3.1.1/kernel/lockdep.c 2011-11-16 18:39:08.000000000 -0500
63655@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63656 end = (unsigned long) &_end,
63657 addr = (unsigned long) obj;
63658
63659+#ifdef CONFIG_PAX_KERNEXEC
63660+ start = ktla_ktva(start);
63661+#endif
63662+
63663 /*
63664 * static variable?
63665 */
63666@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63667 if (!static_obj(lock->key)) {
63668 debug_locks_off();
63669 printk("INFO: trying to register non-static key.\n");
63670+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63671 printk("the code is fine but needs lockdep annotation.\n");
63672 printk("turning off the locking correctness validator.\n");
63673 dump_stack();
63674@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep
63675 if (!class)
63676 return 0;
63677 }
63678- atomic_inc((atomic_t *)&class->ops);
63679+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63680 if (very_verbose(class)) {
63681 printk("\nacquire class [%p] %s", class->key, class->name);
63682 if (class->name_version > 1)
63683diff -urNp linux-3.1.1/kernel/lockdep_proc.c linux-3.1.1/kernel/lockdep_proc.c
63684--- linux-3.1.1/kernel/lockdep_proc.c 2011-11-11 15:19:27.000000000 -0500
63685+++ linux-3.1.1/kernel/lockdep_proc.c 2011-11-16 18:39:08.000000000 -0500
63686@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63687
63688 static void print_name(struct seq_file *m, struct lock_class *class)
63689 {
63690- char str[128];
63691+ char str[KSYM_NAME_LEN];
63692 const char *name = class->name;
63693
63694 if (!name) {
63695diff -urNp linux-3.1.1/kernel/module.c linux-3.1.1/kernel/module.c
63696--- linux-3.1.1/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
63697+++ linux-3.1.1/kernel/module.c 2011-11-16 18:40:44.000000000 -0500
63698@@ -58,6 +58,7 @@
63699 #include <linux/jump_label.h>
63700 #include <linux/pfn.h>
63701 #include <linux/bsearch.h>
63702+#include <linux/grsecurity.h>
63703
63704 #define CREATE_TRACE_POINTS
63705 #include <trace/events/module.h>
63706@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63707
63708 /* Bounds of module allocation, for speeding __module_address.
63709 * Protected by module_mutex. */
63710-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63711+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63712+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63713
63714 int register_module_notifier(struct notifier_block * nb)
63715 {
63716@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63717 return true;
63718
63719 list_for_each_entry_rcu(mod, &modules, list) {
63720- struct symsearch arr[] = {
63721+ struct symsearch modarr[] = {
63722 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63723 NOT_GPL_ONLY, false },
63724 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63725@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63726 #endif
63727 };
63728
63729- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63730+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63731 return true;
63732 }
63733 return false;
63734@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63735 static int percpu_modalloc(struct module *mod,
63736 unsigned long size, unsigned long align)
63737 {
63738- if (align > PAGE_SIZE) {
63739+ if (align-1 >= PAGE_SIZE) {
63740 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63741 mod->name, align, PAGE_SIZE);
63742 align = PAGE_SIZE;
63743@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
63744 */
63745 #ifdef CONFIG_SYSFS
63746
63747-#ifdef CONFIG_KALLSYMS
63748+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63749 static inline bool sect_empty(const Elf_Shdr *sect)
63750 {
63751 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63752@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base
63753
63754 static void unset_module_core_ro_nx(struct module *mod)
63755 {
63756- set_page_attributes(mod->module_core + mod->core_text_size,
63757- mod->module_core + mod->core_size,
63758+ set_page_attributes(mod->module_core_rw,
63759+ mod->module_core_rw + mod->core_size_rw,
63760 set_memory_x);
63761- set_page_attributes(mod->module_core,
63762- mod->module_core + mod->core_ro_size,
63763+ set_page_attributes(mod->module_core_rx,
63764+ mod->module_core_rx + mod->core_size_rx,
63765 set_memory_rw);
63766 }
63767
63768 static void unset_module_init_ro_nx(struct module *mod)
63769 {
63770- set_page_attributes(mod->module_init + mod->init_text_size,
63771- mod->module_init + mod->init_size,
63772+ set_page_attributes(mod->module_init_rw,
63773+ mod->module_init_rw + mod->init_size_rw,
63774 set_memory_x);
63775- set_page_attributes(mod->module_init,
63776- mod->module_init + mod->init_ro_size,
63777+ set_page_attributes(mod->module_init_rx,
63778+ mod->module_init_rx + mod->init_size_rx,
63779 set_memory_rw);
63780 }
63781
63782@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
63783
63784 mutex_lock(&module_mutex);
63785 list_for_each_entry_rcu(mod, &modules, list) {
63786- if ((mod->module_core) && (mod->core_text_size)) {
63787- set_page_attributes(mod->module_core,
63788- mod->module_core + mod->core_text_size,
63789+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63790+ set_page_attributes(mod->module_core_rx,
63791+ mod->module_core_rx + mod->core_size_rx,
63792 set_memory_rw);
63793 }
63794- if ((mod->module_init) && (mod->init_text_size)) {
63795- set_page_attributes(mod->module_init,
63796- mod->module_init + mod->init_text_size,
63797+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63798+ set_page_attributes(mod->module_init_rx,
63799+ mod->module_init_rx + mod->init_size_rx,
63800 set_memory_rw);
63801 }
63802 }
63803@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
63804
63805 mutex_lock(&module_mutex);
63806 list_for_each_entry_rcu(mod, &modules, list) {
63807- if ((mod->module_core) && (mod->core_text_size)) {
63808- set_page_attributes(mod->module_core,
63809- mod->module_core + mod->core_text_size,
63810+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63811+ set_page_attributes(mod->module_core_rx,
63812+ mod->module_core_rx + mod->core_size_rx,
63813 set_memory_ro);
63814 }
63815- if ((mod->module_init) && (mod->init_text_size)) {
63816- set_page_attributes(mod->module_init,
63817- mod->module_init + mod->init_text_size,
63818+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63819+ set_page_attributes(mod->module_init_rx,
63820+ mod->module_init_rx + mod->init_size_rx,
63821 set_memory_ro);
63822 }
63823 }
63824@@ -1748,16 +1750,19 @@ static void free_module(struct module *m
63825
63826 /* This may be NULL, but that's OK */
63827 unset_module_init_ro_nx(mod);
63828- module_free(mod, mod->module_init);
63829+ module_free(mod, mod->module_init_rw);
63830+ module_free_exec(mod, mod->module_init_rx);
63831 kfree(mod->args);
63832 percpu_modfree(mod);
63833
63834 /* Free lock-classes: */
63835- lockdep_free_key_range(mod->module_core, mod->core_size);
63836+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63837+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63838
63839 /* Finally, free the core (containing the module structure) */
63840 unset_module_core_ro_nx(mod);
63841- module_free(mod, mod->module_core);
63842+ module_free_exec(mod, mod->module_core_rx);
63843+ module_free(mod, mod->module_core_rw);
63844
63845 #ifdef CONFIG_MPU
63846 update_protections(current->mm);
63847@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct modul
63848 unsigned int i;
63849 int ret = 0;
63850 const struct kernel_symbol *ksym;
63851+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63852+ int is_fs_load = 0;
63853+ int register_filesystem_found = 0;
63854+ char *p;
63855+
63856+ p = strstr(mod->args, "grsec_modharden_fs");
63857+ if (p) {
63858+ char *endptr = p + strlen("grsec_modharden_fs");
63859+ /* copy \0 as well */
63860+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63861+ is_fs_load = 1;
63862+ }
63863+#endif
63864
63865 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63866 const char *name = info->strtab + sym[i].st_name;
63867
63868+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63869+ /* it's a real shame this will never get ripped and copied
63870+ upstream! ;(
63871+ */
63872+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63873+ register_filesystem_found = 1;
63874+#endif
63875+
63876 switch (sym[i].st_shndx) {
63877 case SHN_COMMON:
63878 /* We compiled with -fno-common. These are not
63879@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct modul
63880 ksym = resolve_symbol_wait(mod, info, name);
63881 /* Ok if resolved. */
63882 if (ksym && !IS_ERR(ksym)) {
63883+ pax_open_kernel();
63884 sym[i].st_value = ksym->value;
63885+ pax_close_kernel();
63886 break;
63887 }
63888
63889@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct modul
63890 secbase = (unsigned long)mod_percpu(mod);
63891 else
63892 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63893+ pax_open_kernel();
63894 sym[i].st_value += secbase;
63895+ pax_close_kernel();
63896 break;
63897 }
63898 }
63899
63900+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63901+ if (is_fs_load && !register_filesystem_found) {
63902+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63903+ ret = -EPERM;
63904+ }
63905+#endif
63906+
63907 return ret;
63908 }
63909
63910@@ -1977,22 +2014,12 @@ static void layout_sections(struct modul
63911 || s->sh_entsize != ~0UL
63912 || strstarts(sname, ".init"))
63913 continue;
63914- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63915+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63916+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63917+ else
63918+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63919 DEBUGP("\t%s\n", name);
63920 }
63921- switch (m) {
63922- case 0: /* executable */
63923- mod->core_size = debug_align(mod->core_size);
63924- mod->core_text_size = mod->core_size;
63925- break;
63926- case 1: /* RO: text and ro-data */
63927- mod->core_size = debug_align(mod->core_size);
63928- mod->core_ro_size = mod->core_size;
63929- break;
63930- case 3: /* whole core */
63931- mod->core_size = debug_align(mod->core_size);
63932- break;
63933- }
63934 }
63935
63936 DEBUGP("Init section allocation order:\n");
63937@@ -2006,23 +2033,13 @@ static void layout_sections(struct modul
63938 || s->sh_entsize != ~0UL
63939 || !strstarts(sname, ".init"))
63940 continue;
63941- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63942- | INIT_OFFSET_MASK);
63943+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63944+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63945+ else
63946+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63947+ s->sh_entsize |= INIT_OFFSET_MASK;
63948 DEBUGP("\t%s\n", sname);
63949 }
63950- switch (m) {
63951- case 0: /* executable */
63952- mod->init_size = debug_align(mod->init_size);
63953- mod->init_text_size = mod->init_size;
63954- break;
63955- case 1: /* RO: text and ro-data */
63956- mod->init_size = debug_align(mod->init_size);
63957- mod->init_ro_size = mod->init_size;
63958- break;
63959- case 3: /* whole init */
63960- mod->init_size = debug_align(mod->init_size);
63961- break;
63962- }
63963 }
63964 }
63965
63966@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module
63967
63968 /* Put symbol section at end of init part of module. */
63969 symsect->sh_flags |= SHF_ALLOC;
63970- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63971+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63972 info->index.sym) | INIT_OFFSET_MASK;
63973 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63974
63975@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module
63976 }
63977
63978 /* Append room for core symbols at end of core part. */
63979- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63980- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63981+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63982+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63983
63984 /* Put string table section at end of init part of module. */
63985 strsect->sh_flags |= SHF_ALLOC;
63986- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63987+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63988 info->index.str) | INIT_OFFSET_MASK;
63989 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63990
63991 /* Append room for core symbols' strings at end of core part. */
63992- info->stroffs = mod->core_size;
63993+ info->stroffs = mod->core_size_rx;
63994 __set_bit(0, info->strmap);
63995- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63996+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63997 }
63998
63999 static void add_kallsyms(struct module *mod, const struct load_info *info)
64000@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *
64001 /* Make sure we get permanent strtab: don't use info->strtab. */
64002 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64003
64004+ pax_open_kernel();
64005+
64006 /* Set types up while we still have access to sections. */
64007 for (i = 0; i < mod->num_symtab; i++)
64008 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64009
64010- mod->core_symtab = dst = mod->module_core + info->symoffs;
64011+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64012 src = mod->symtab;
64013 *dst = *src;
64014 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64015@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *
64016 }
64017 mod->core_num_syms = ndst;
64018
64019- mod->core_strtab = s = mod->module_core + info->stroffs;
64020+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64021 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64022 if (test_bit(i, info->strmap))
64023 *++s = mod->strtab[i];
64024+
64025+ pax_close_kernel();
64026 }
64027 #else
64028 static inline void layout_symtab(struct module *mod, struct load_info *info)
64029@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long
64030 return size == 0 ? NULL : vmalloc_exec(size);
64031 }
64032
64033-static void *module_alloc_update_bounds(unsigned long size)
64034+static void *module_alloc_update_bounds_rw(unsigned long size)
64035 {
64036 void *ret = module_alloc(size);
64037
64038 if (ret) {
64039 mutex_lock(&module_mutex);
64040 /* Update module bounds. */
64041- if ((unsigned long)ret < module_addr_min)
64042- module_addr_min = (unsigned long)ret;
64043- if ((unsigned long)ret + size > module_addr_max)
64044- module_addr_max = (unsigned long)ret + size;
64045+ if ((unsigned long)ret < module_addr_min_rw)
64046+ module_addr_min_rw = (unsigned long)ret;
64047+ if ((unsigned long)ret + size > module_addr_max_rw)
64048+ module_addr_max_rw = (unsigned long)ret + size;
64049+ mutex_unlock(&module_mutex);
64050+ }
64051+ return ret;
64052+}
64053+
64054+static void *module_alloc_update_bounds_rx(unsigned long size)
64055+{
64056+ void *ret = module_alloc_exec(size);
64057+
64058+ if (ret) {
64059+ mutex_lock(&module_mutex);
64060+ /* Update module bounds. */
64061+ if ((unsigned long)ret < module_addr_min_rx)
64062+ module_addr_min_rx = (unsigned long)ret;
64063+ if ((unsigned long)ret + size > module_addr_max_rx)
64064+ module_addr_max_rx = (unsigned long)ret + size;
64065 mutex_unlock(&module_mutex);
64066 }
64067 return ret;
64068@@ -2589,7 +2626,7 @@ static int move_module(struct module *mo
64069 void *ptr;
64070
64071 /* Do the allocs. */
64072- ptr = module_alloc_update_bounds(mod->core_size);
64073+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64074 /*
64075 * The pointer to this block is stored in the module structure
64076 * which is inside the block. Just mark it as not being a
64077@@ -2599,23 +2636,50 @@ static int move_module(struct module *mo
64078 if (!ptr)
64079 return -ENOMEM;
64080
64081- memset(ptr, 0, mod->core_size);
64082- mod->module_core = ptr;
64083+ memset(ptr, 0, mod->core_size_rw);
64084+ mod->module_core_rw = ptr;
64085
64086- ptr = module_alloc_update_bounds(mod->init_size);
64087+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64088 /*
64089 * The pointer to this block is stored in the module structure
64090 * which is inside the block. This block doesn't need to be
64091 * scanned as it contains data and code that will be freed
64092 * after the module is initialized.
64093 */
64094- kmemleak_ignore(ptr);
64095- if (!ptr && mod->init_size) {
64096- module_free(mod, mod->module_core);
64097+ kmemleak_not_leak(ptr);
64098+ if (!ptr && mod->init_size_rw) {
64099+ module_free(mod, mod->module_core_rw);
64100 return -ENOMEM;
64101 }
64102- memset(ptr, 0, mod->init_size);
64103- mod->module_init = ptr;
64104+ memset(ptr, 0, mod->init_size_rw);
64105+ mod->module_init_rw = ptr;
64106+
64107+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64108+ kmemleak_not_leak(ptr);
64109+ if (!ptr) {
64110+ module_free(mod, mod->module_init_rw);
64111+ module_free(mod, mod->module_core_rw);
64112+ return -ENOMEM;
64113+ }
64114+
64115+ pax_open_kernel();
64116+ memset(ptr, 0, mod->core_size_rx);
64117+ pax_close_kernel();
64118+ mod->module_core_rx = ptr;
64119+
64120+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64121+ kmemleak_not_leak(ptr);
64122+ if (!ptr && mod->init_size_rx) {
64123+ module_free_exec(mod, mod->module_core_rx);
64124+ module_free(mod, mod->module_init_rw);
64125+ module_free(mod, mod->module_core_rw);
64126+ return -ENOMEM;
64127+ }
64128+
64129+ pax_open_kernel();
64130+ memset(ptr, 0, mod->init_size_rx);
64131+ pax_close_kernel();
64132+ mod->module_init_rx = ptr;
64133
64134 /* Transfer each section which specifies SHF_ALLOC */
64135 DEBUGP("final section addresses:\n");
64136@@ -2626,16 +2690,45 @@ static int move_module(struct module *mo
64137 if (!(shdr->sh_flags & SHF_ALLOC))
64138 continue;
64139
64140- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64141- dest = mod->module_init
64142- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64143- else
64144- dest = mod->module_core + shdr->sh_entsize;
64145+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64146+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64147+ dest = mod->module_init_rw
64148+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64149+ else
64150+ dest = mod->module_init_rx
64151+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64152+ } else {
64153+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64154+ dest = mod->module_core_rw + shdr->sh_entsize;
64155+ else
64156+ dest = mod->module_core_rx + shdr->sh_entsize;
64157+ }
64158+
64159+ if (shdr->sh_type != SHT_NOBITS) {
64160+
64161+#ifdef CONFIG_PAX_KERNEXEC
64162+#ifdef CONFIG_X86_64
64163+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64164+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64165+#endif
64166+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64167+ pax_open_kernel();
64168+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64169+ pax_close_kernel();
64170+ } else
64171+#endif
64172
64173- if (shdr->sh_type != SHT_NOBITS)
64174 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64175+ }
64176 /* Update sh_addr to point to copy in image. */
64177- shdr->sh_addr = (unsigned long)dest;
64178+
64179+#ifdef CONFIG_PAX_KERNEXEC
64180+ if (shdr->sh_flags & SHF_EXECINSTR)
64181+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64182+ else
64183+#endif
64184+
64185+ shdr->sh_addr = (unsigned long)dest;
64186 DEBUGP("\t0x%lx %s\n",
64187 shdr->sh_addr, info->secstrings + shdr->sh_name);
64188 }
64189@@ -2686,12 +2779,12 @@ static void flush_module_icache(const st
64190 * Do it before processing of module parameters, so the module
64191 * can provide parameter accessor functions of its own.
64192 */
64193- if (mod->module_init)
64194- flush_icache_range((unsigned long)mod->module_init,
64195- (unsigned long)mod->module_init
64196- + mod->init_size);
64197- flush_icache_range((unsigned long)mod->module_core,
64198- (unsigned long)mod->module_core + mod->core_size);
64199+ if (mod->module_init_rx)
64200+ flush_icache_range((unsigned long)mod->module_init_rx,
64201+ (unsigned long)mod->module_init_rx
64202+ + mod->init_size_rx);
64203+ flush_icache_range((unsigned long)mod->module_core_rx,
64204+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64205
64206 set_fs(old_fs);
64207 }
64208@@ -2771,8 +2864,10 @@ static void module_deallocate(struct mod
64209 {
64210 kfree(info->strmap);
64211 percpu_modfree(mod);
64212- module_free(mod, mod->module_init);
64213- module_free(mod, mod->module_core);
64214+ module_free_exec(mod, mod->module_init_rx);
64215+ module_free_exec(mod, mod->module_core_rx);
64216+ module_free(mod, mod->module_init_rw);
64217+ module_free(mod, mod->module_core_rw);
64218 }
64219
64220 int __weak module_finalize(const Elf_Ehdr *hdr,
64221@@ -2836,9 +2931,38 @@ static struct module *load_module(void _
64222 if (err)
64223 goto free_unload;
64224
64225+ /* Now copy in args */
64226+ mod->args = strndup_user(uargs, ~0UL >> 1);
64227+ if (IS_ERR(mod->args)) {
64228+ err = PTR_ERR(mod->args);
64229+ goto free_unload;
64230+ }
64231+
64232 /* Set up MODINFO_ATTR fields */
64233 setup_modinfo(mod, &info);
64234
64235+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64236+ {
64237+ char *p, *p2;
64238+
64239+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64240+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64241+ err = -EPERM;
64242+ goto free_modinfo;
64243+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64244+ p += strlen("grsec_modharden_normal");
64245+ p2 = strstr(p, "_");
64246+ if (p2) {
64247+ *p2 = '\0';
64248+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64249+ *p2 = '_';
64250+ }
64251+ err = -EPERM;
64252+ goto free_modinfo;
64253+ }
64254+ }
64255+#endif
64256+
64257 /* Fix up syms, so that st_value is a pointer to location. */
64258 err = simplify_symbols(mod, &info);
64259 if (err < 0)
64260@@ -2854,13 +2978,6 @@ static struct module *load_module(void _
64261
64262 flush_module_icache(mod);
64263
64264- /* Now copy in args */
64265- mod->args = strndup_user(uargs, ~0UL >> 1);
64266- if (IS_ERR(mod->args)) {
64267- err = PTR_ERR(mod->args);
64268- goto free_arch_cleanup;
64269- }
64270-
64271 /* Mark state as coming so strong_try_module_get() ignores us. */
64272 mod->state = MODULE_STATE_COMING;
64273
64274@@ -2920,11 +3037,10 @@ static struct module *load_module(void _
64275 unlock:
64276 mutex_unlock(&module_mutex);
64277 synchronize_sched();
64278- kfree(mod->args);
64279- free_arch_cleanup:
64280 module_arch_cleanup(mod);
64281 free_modinfo:
64282 free_modinfo(mod);
64283+ kfree(mod->args);
64284 free_unload:
64285 module_unload_free(mod);
64286 free_module:
64287@@ -2965,16 +3081,16 @@ SYSCALL_DEFINE3(init_module, void __user
64288 MODULE_STATE_COMING, mod);
64289
64290 /* Set RO and NX regions for core */
64291- set_section_ro_nx(mod->module_core,
64292- mod->core_text_size,
64293- mod->core_ro_size,
64294- mod->core_size);
64295+ set_section_ro_nx(mod->module_core_rx,
64296+ mod->core_size_rx,
64297+ mod->core_size_rx,
64298+ mod->core_size_rx);
64299
64300 /* Set RO and NX regions for init */
64301- set_section_ro_nx(mod->module_init,
64302- mod->init_text_size,
64303- mod->init_ro_size,
64304- mod->init_size);
64305+ set_section_ro_nx(mod->module_init_rx,
64306+ mod->init_size_rx,
64307+ mod->init_size_rx,
64308+ mod->init_size_rx);
64309
64310 do_mod_ctors(mod);
64311 /* Start the module */
64312@@ -3020,11 +3136,12 @@ SYSCALL_DEFINE3(init_module, void __user
64313 mod->strtab = mod->core_strtab;
64314 #endif
64315 unset_module_init_ro_nx(mod);
64316- module_free(mod, mod->module_init);
64317- mod->module_init = NULL;
64318- mod->init_size = 0;
64319- mod->init_ro_size = 0;
64320- mod->init_text_size = 0;
64321+ module_free(mod, mod->module_init_rw);
64322+ module_free_exec(mod, mod->module_init_rx);
64323+ mod->module_init_rw = NULL;
64324+ mod->module_init_rx = NULL;
64325+ mod->init_size_rw = 0;
64326+ mod->init_size_rx = 0;
64327 mutex_unlock(&module_mutex);
64328
64329 return 0;
64330@@ -3055,10 +3172,16 @@ static const char *get_ksymbol(struct mo
64331 unsigned long nextval;
64332
64333 /* At worse, next value is at end of module */
64334- if (within_module_init(addr, mod))
64335- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64336+ if (within_module_init_rx(addr, mod))
64337+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64338+ else if (within_module_init_rw(addr, mod))
64339+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64340+ else if (within_module_core_rx(addr, mod))
64341+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64342+ else if (within_module_core_rw(addr, mod))
64343+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64344 else
64345- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64346+ return NULL;
64347
64348 /* Scan for closest preceding symbol, and next symbol. (ELF
64349 starts real symbols at 1). */
64350@@ -3304,7 +3427,7 @@ static int m_show(struct seq_file *m, vo
64351 char buf[8];
64352
64353 seq_printf(m, "%s %u",
64354- mod->name, mod->init_size + mod->core_size);
64355+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64356 print_unload_info(m, mod);
64357
64358 /* Informative for users. */
64359@@ -3313,7 +3436,7 @@ static int m_show(struct seq_file *m, vo
64360 mod->state == MODULE_STATE_COMING ? "Loading":
64361 "Live");
64362 /* Used by oprofile and other similar tools. */
64363- seq_printf(m, " 0x%pK", mod->module_core);
64364+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64365
64366 /* Taints info */
64367 if (mod->taints)
64368@@ -3349,7 +3472,17 @@ static const struct file_operations proc
64369
64370 static int __init proc_modules_init(void)
64371 {
64372+#ifndef CONFIG_GRKERNSEC_HIDESYM
64373+#ifdef CONFIG_GRKERNSEC_PROC_USER
64374+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64375+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64376+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64377+#else
64378 proc_create("modules", 0, NULL, &proc_modules_operations);
64379+#endif
64380+#else
64381+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64382+#endif
64383 return 0;
64384 }
64385 module_init(proc_modules_init);
64386@@ -3408,12 +3541,12 @@ struct module *__module_address(unsigned
64387 {
64388 struct module *mod;
64389
64390- if (addr < module_addr_min || addr > module_addr_max)
64391+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64392+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64393 return NULL;
64394
64395 list_for_each_entry_rcu(mod, &modules, list)
64396- if (within_module_core(addr, mod)
64397- || within_module_init(addr, mod))
64398+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64399 return mod;
64400 return NULL;
64401 }
64402@@ -3447,11 +3580,20 @@ bool is_module_text_address(unsigned lon
64403 */
64404 struct module *__module_text_address(unsigned long addr)
64405 {
64406- struct module *mod = __module_address(addr);
64407+ struct module *mod;
64408+
64409+#ifdef CONFIG_X86_32
64410+ addr = ktla_ktva(addr);
64411+#endif
64412+
64413+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64414+ return NULL;
64415+
64416+ mod = __module_address(addr);
64417+
64418 if (mod) {
64419 /* Make sure it's within the text section. */
64420- if (!within(addr, mod->module_init, mod->init_text_size)
64421- && !within(addr, mod->module_core, mod->core_text_size))
64422+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64423 mod = NULL;
64424 }
64425 return mod;
64426diff -urNp linux-3.1.1/kernel/mutex.c linux-3.1.1/kernel/mutex.c
64427--- linux-3.1.1/kernel/mutex.c 2011-11-11 15:19:27.000000000 -0500
64428+++ linux-3.1.1/kernel/mutex.c 2011-11-16 18:39:08.000000000 -0500
64429@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64430 spin_lock_mutex(&lock->wait_lock, flags);
64431
64432 debug_mutex_lock_common(lock, &waiter);
64433- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64434+ debug_mutex_add_waiter(lock, &waiter, task);
64435
64436 /* add waiting tasks to the end of the waitqueue (FIFO): */
64437 list_add_tail(&waiter.list, &lock->wait_list);
64438@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64439 * TASK_UNINTERRUPTIBLE case.)
64440 */
64441 if (unlikely(signal_pending_state(state, task))) {
64442- mutex_remove_waiter(lock, &waiter,
64443- task_thread_info(task));
64444+ mutex_remove_waiter(lock, &waiter, task);
64445 mutex_release(&lock->dep_map, 1, ip);
64446 spin_unlock_mutex(&lock->wait_lock, flags);
64447
64448@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64449 done:
64450 lock_acquired(&lock->dep_map, ip);
64451 /* got the lock - rejoice! */
64452- mutex_remove_waiter(lock, &waiter, current_thread_info());
64453+ mutex_remove_waiter(lock, &waiter, task);
64454 mutex_set_owner(lock);
64455
64456 /* set it to 0 if there are no waiters left: */
64457diff -urNp linux-3.1.1/kernel/mutex-debug.c linux-3.1.1/kernel/mutex-debug.c
64458--- linux-3.1.1/kernel/mutex-debug.c 2011-11-11 15:19:27.000000000 -0500
64459+++ linux-3.1.1/kernel/mutex-debug.c 2011-11-16 18:39:08.000000000 -0500
64460@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64461 }
64462
64463 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64464- struct thread_info *ti)
64465+ struct task_struct *task)
64466 {
64467 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64468
64469 /* Mark the current thread as blocked on the lock: */
64470- ti->task->blocked_on = waiter;
64471+ task->blocked_on = waiter;
64472 }
64473
64474 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64475- struct thread_info *ti)
64476+ struct task_struct *task)
64477 {
64478 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64479- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64480- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64481- ti->task->blocked_on = NULL;
64482+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64483+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64484+ task->blocked_on = NULL;
64485
64486 list_del_init(&waiter->list);
64487 waiter->task = NULL;
64488diff -urNp linux-3.1.1/kernel/mutex-debug.h linux-3.1.1/kernel/mutex-debug.h
64489--- linux-3.1.1/kernel/mutex-debug.h 2011-11-11 15:19:27.000000000 -0500
64490+++ linux-3.1.1/kernel/mutex-debug.h 2011-11-16 18:39:08.000000000 -0500
64491@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64492 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64493 extern void debug_mutex_add_waiter(struct mutex *lock,
64494 struct mutex_waiter *waiter,
64495- struct thread_info *ti);
64496+ struct task_struct *task);
64497 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64498- struct thread_info *ti);
64499+ struct task_struct *task);
64500 extern void debug_mutex_unlock(struct mutex *lock);
64501 extern void debug_mutex_init(struct mutex *lock, const char *name,
64502 struct lock_class_key *key);
64503diff -urNp linux-3.1.1/kernel/padata.c linux-3.1.1/kernel/padata.c
64504--- linux-3.1.1/kernel/padata.c 2011-11-11 15:19:27.000000000 -0500
64505+++ linux-3.1.1/kernel/padata.c 2011-11-16 18:39:08.000000000 -0500
64506@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64507 padata->pd = pd;
64508 padata->cb_cpu = cb_cpu;
64509
64510- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64511- atomic_set(&pd->seq_nr, -1);
64512+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64513+ atomic_set_unchecked(&pd->seq_nr, -1);
64514
64515- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64516+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64517
64518 target_cpu = padata_cpu_hash(padata);
64519 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64520@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64521 padata_init_pqueues(pd);
64522 padata_init_squeues(pd);
64523 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64524- atomic_set(&pd->seq_nr, -1);
64525+ atomic_set_unchecked(&pd->seq_nr, -1);
64526 atomic_set(&pd->reorder_objects, 0);
64527 atomic_set(&pd->refcnt, 0);
64528 pd->pinst = pinst;
64529diff -urNp linux-3.1.1/kernel/panic.c linux-3.1.1/kernel/panic.c
64530--- linux-3.1.1/kernel/panic.c 2011-11-11 15:19:27.000000000 -0500
64531+++ linux-3.1.1/kernel/panic.c 2011-11-16 18:40:44.000000000 -0500
64532@@ -371,7 +371,7 @@ static void warn_slowpath_common(const c
64533 const char *board;
64534
64535 printk(KERN_WARNING "------------[ cut here ]------------\n");
64536- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64537+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64538 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64539 if (board)
64540 printk(KERN_WARNING "Hardware name: %s\n", board);
64541@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64542 */
64543 void __stack_chk_fail(void)
64544 {
64545- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64546+ dump_stack();
64547+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64548 __builtin_return_address(0));
64549 }
64550 EXPORT_SYMBOL(__stack_chk_fail);
64551diff -urNp linux-3.1.1/kernel/pid.c linux-3.1.1/kernel/pid.c
64552--- linux-3.1.1/kernel/pid.c 2011-11-11 15:19:27.000000000 -0500
64553+++ linux-3.1.1/kernel/pid.c 2011-11-16 18:40:44.000000000 -0500
64554@@ -33,6 +33,7 @@
64555 #include <linux/rculist.h>
64556 #include <linux/bootmem.h>
64557 #include <linux/hash.h>
64558+#include <linux/security.h>
64559 #include <linux/pid_namespace.h>
64560 #include <linux/init_task.h>
64561 #include <linux/syscalls.h>
64562@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64563
64564 int pid_max = PID_MAX_DEFAULT;
64565
64566-#define RESERVED_PIDS 300
64567+#define RESERVED_PIDS 500
64568
64569 int pid_max_min = RESERVED_PIDS + 1;
64570 int pid_max_max = PID_MAX_LIMIT;
64571@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
64572 */
64573 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64574 {
64575+ struct task_struct *task;
64576+
64577 rcu_lockdep_assert(rcu_read_lock_held());
64578- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64579+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64580+
64581+ if (gr_pid_is_chrooted(task))
64582+ return NULL;
64583+
64584+ return task;
64585 }
64586
64587 struct task_struct *find_task_by_vpid(pid_t vnr)
64588@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pi
64589 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64590 }
64591
64592+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64593+{
64594+ rcu_lockdep_assert(rcu_read_lock_held());
64595+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64596+}
64597+
64598 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64599 {
64600 struct pid *pid;
64601diff -urNp linux-3.1.1/kernel/posix-cpu-timers.c linux-3.1.1/kernel/posix-cpu-timers.c
64602--- linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-11 15:19:27.000000000 -0500
64603+++ linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-16 18:40:44.000000000 -0500
64604@@ -6,6 +6,7 @@
64605 #include <linux/posix-timers.h>
64606 #include <linux/errno.h>
64607 #include <linux/math64.h>
64608+#include <linux/security.h>
64609 #include <asm/uaccess.h>
64610 #include <linux/kernel_stat.h>
64611 #include <trace/events/timer.h>
64612@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64613
64614 static __init int init_posix_cpu_timers(void)
64615 {
64616- struct k_clock process = {
64617+ static struct k_clock process = {
64618 .clock_getres = process_cpu_clock_getres,
64619 .clock_get = process_cpu_clock_get,
64620 .timer_create = process_cpu_timer_create,
64621 .nsleep = process_cpu_nsleep,
64622 .nsleep_restart = process_cpu_nsleep_restart,
64623 };
64624- struct k_clock thread = {
64625+ static struct k_clock thread = {
64626 .clock_getres = thread_cpu_clock_getres,
64627 .clock_get = thread_cpu_clock_get,
64628 .timer_create = thread_cpu_timer_create,
64629diff -urNp linux-3.1.1/kernel/posix-timers.c linux-3.1.1/kernel/posix-timers.c
64630--- linux-3.1.1/kernel/posix-timers.c 2011-11-11 15:19:27.000000000 -0500
64631+++ linux-3.1.1/kernel/posix-timers.c 2011-11-16 18:40:44.000000000 -0500
64632@@ -43,6 +43,7 @@
64633 #include <linux/idr.h>
64634 #include <linux/posix-clock.h>
64635 #include <linux/posix-timers.h>
64636+#include <linux/grsecurity.h>
64637 #include <linux/syscalls.h>
64638 #include <linux/wait.h>
64639 #include <linux/workqueue.h>
64640@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64641 * which we beg off on and pass to do_sys_settimeofday().
64642 */
64643
64644-static struct k_clock posix_clocks[MAX_CLOCKS];
64645+static struct k_clock *posix_clocks[MAX_CLOCKS];
64646
64647 /*
64648 * These ones are defined below.
64649@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64650 */
64651 static __init int init_posix_timers(void)
64652 {
64653- struct k_clock clock_realtime = {
64654+ static struct k_clock clock_realtime = {
64655 .clock_getres = hrtimer_get_res,
64656 .clock_get = posix_clock_realtime_get,
64657 .clock_set = posix_clock_realtime_set,
64658@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64659 .timer_get = common_timer_get,
64660 .timer_del = common_timer_del,
64661 };
64662- struct k_clock clock_monotonic = {
64663+ static struct k_clock clock_monotonic = {
64664 .clock_getres = hrtimer_get_res,
64665 .clock_get = posix_ktime_get_ts,
64666 .nsleep = common_nsleep,
64667@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64668 .timer_get = common_timer_get,
64669 .timer_del = common_timer_del,
64670 };
64671- struct k_clock clock_monotonic_raw = {
64672+ static struct k_clock clock_monotonic_raw = {
64673 .clock_getres = hrtimer_get_res,
64674 .clock_get = posix_get_monotonic_raw,
64675 };
64676- struct k_clock clock_realtime_coarse = {
64677+ static struct k_clock clock_realtime_coarse = {
64678 .clock_getres = posix_get_coarse_res,
64679 .clock_get = posix_get_realtime_coarse,
64680 };
64681- struct k_clock clock_monotonic_coarse = {
64682+ static struct k_clock clock_monotonic_coarse = {
64683 .clock_getres = posix_get_coarse_res,
64684 .clock_get = posix_get_monotonic_coarse,
64685 };
64686- struct k_clock clock_boottime = {
64687+ static struct k_clock clock_boottime = {
64688 .clock_getres = hrtimer_get_res,
64689 .clock_get = posix_get_boottime,
64690 .nsleep = common_nsleep,
64691@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64692 .timer_del = common_timer_del,
64693 };
64694
64695+ pax_track_stack();
64696+
64697 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64698 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64699 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64700@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64701 return;
64702 }
64703
64704- posix_clocks[clock_id] = *new_clock;
64705+ posix_clocks[clock_id] = new_clock;
64706 }
64707 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64708
64709@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64710 return (id & CLOCKFD_MASK) == CLOCKFD ?
64711 &clock_posix_dynamic : &clock_posix_cpu;
64712
64713- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64714+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64715 return NULL;
64716- return &posix_clocks[id];
64717+ return posix_clocks[id];
64718 }
64719
64720 static int common_timer_create(struct k_itimer *new_timer)
64721@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64722 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64723 return -EFAULT;
64724
64725+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64726+ have their clock_set fptr set to a nosettime dummy function
64727+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64728+ call common_clock_set, which calls do_sys_settimeofday, which
64729+ we hook
64730+ */
64731+
64732 return kc->clock_set(which_clock, &new_tp);
64733 }
64734
64735diff -urNp linux-3.1.1/kernel/power/poweroff.c linux-3.1.1/kernel/power/poweroff.c
64736--- linux-3.1.1/kernel/power/poweroff.c 2011-11-11 15:19:27.000000000 -0500
64737+++ linux-3.1.1/kernel/power/poweroff.c 2011-11-16 18:39:08.000000000 -0500
64738@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64739 .enable_mask = SYSRQ_ENABLE_BOOT,
64740 };
64741
64742-static int pm_sysrq_init(void)
64743+static int __init pm_sysrq_init(void)
64744 {
64745 register_sysrq_key('o', &sysrq_poweroff_op);
64746 return 0;
64747diff -urNp linux-3.1.1/kernel/power/process.c linux-3.1.1/kernel/power/process.c
64748--- linux-3.1.1/kernel/power/process.c 2011-11-11 15:19:27.000000000 -0500
64749+++ linux-3.1.1/kernel/power/process.c 2011-11-16 18:39:08.000000000 -0500
64750@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64751 u64 elapsed_csecs64;
64752 unsigned int elapsed_csecs;
64753 bool wakeup = false;
64754+ bool timedout = false;
64755
64756 do_gettimeofday(&start);
64757
64758@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64759
64760 while (true) {
64761 todo = 0;
64762+ if (time_after(jiffies, end_time))
64763+ timedout = true;
64764 read_lock(&tasklist_lock);
64765 do_each_thread(g, p) {
64766 if (frozen(p) || !freezable(p))
64767@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64768 * try_to_stop() after schedule() in ptrace/signal
64769 * stop sees TIF_FREEZE.
64770 */
64771- if (!task_is_stopped_or_traced(p) &&
64772- !freezer_should_skip(p))
64773+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64774 todo++;
64775+ if (timedout) {
64776+ printk(KERN_ERR "Task refusing to freeze:\n");
64777+ sched_show_task(p);
64778+ }
64779+ }
64780 } while_each_thread(g, p);
64781 read_unlock(&tasklist_lock);
64782
64783@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64784 todo += wq_busy;
64785 }
64786
64787- if (!todo || time_after(jiffies, end_time))
64788+ if (!todo || timedout)
64789 break;
64790
64791 if (pm_wakeup_pending()) {
64792diff -urNp linux-3.1.1/kernel/printk.c linux-3.1.1/kernel/printk.c
64793--- linux-3.1.1/kernel/printk.c 2011-11-11 15:19:27.000000000 -0500
64794+++ linux-3.1.1/kernel/printk.c 2011-11-16 19:38:11.000000000 -0500
64795@@ -313,6 +313,11 @@ static int check_syslog_permissions(int
64796 if (from_file && type != SYSLOG_ACTION_OPEN)
64797 return 0;
64798
64799+#ifdef CONFIG_GRKERNSEC_DMESG
64800+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64801+ return -EPERM;
64802+#endif
64803+
64804 if (syslog_action_restricted(type)) {
64805 if (capable(CAP_SYSLOG))
64806 return 0;
64807diff -urNp linux-3.1.1/kernel/profile.c linux-3.1.1/kernel/profile.c
64808--- linux-3.1.1/kernel/profile.c 2011-11-11 15:19:27.000000000 -0500
64809+++ linux-3.1.1/kernel/profile.c 2011-11-16 18:39:08.000000000 -0500
64810@@ -39,7 +39,7 @@ struct profile_hit {
64811 /* Oprofile timer tick hook */
64812 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64813
64814-static atomic_t *prof_buffer;
64815+static atomic_unchecked_t *prof_buffer;
64816 static unsigned long prof_len, prof_shift;
64817
64818 int prof_on __read_mostly;
64819@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64820 hits[i].pc = 0;
64821 continue;
64822 }
64823- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64824+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64825 hits[i].hits = hits[i].pc = 0;
64826 }
64827 }
64828@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64829 * Add the current hit(s) and flush the write-queue out
64830 * to the global buffer:
64831 */
64832- atomic_add(nr_hits, &prof_buffer[pc]);
64833+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64834 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64835- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64836+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64837 hits[i].pc = hits[i].hits = 0;
64838 }
64839 out:
64840@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64841 {
64842 unsigned long pc;
64843 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64844- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64845+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64846 }
64847 #endif /* !CONFIG_SMP */
64848
64849@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64850 return -EFAULT;
64851 buf++; p++; count--; read++;
64852 }
64853- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64854+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64855 if (copy_to_user(buf, (void *)pnt, count))
64856 return -EFAULT;
64857 read += count;
64858@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64859 }
64860 #endif
64861 profile_discard_flip_buffers();
64862- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64863+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64864 return count;
64865 }
64866
64867diff -urNp linux-3.1.1/kernel/ptrace.c linux-3.1.1/kernel/ptrace.c
64868--- linux-3.1.1/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
64869+++ linux-3.1.1/kernel/ptrace.c 2011-11-16 19:50:22.000000000 -0500
64870@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_stru
64871 return ret;
64872 }
64873
64874-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64875+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64876+ unsigned int log)
64877 {
64878 const struct cred *cred = current_cred(), *tcred;
64879
64880@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_stru
64881 cred->gid == tcred->sgid &&
64882 cred->gid == tcred->gid))
64883 goto ok;
64884- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64885+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64886+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64887 goto ok;
64888 rcu_read_unlock();
64889 return -EPERM;
64890@@ -196,7 +198,9 @@ ok:
64891 smp_rmb();
64892 if (task->mm)
64893 dumpable = get_dumpable(task->mm);
64894- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64895+ if (!dumpable &&
64896+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64897+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64898 return -EPERM;
64899
64900 return security_ptrace_access_check(task, mode);
64901@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struc
64902 {
64903 int err;
64904 task_lock(task);
64905- err = __ptrace_may_access(task, mode);
64906+ err = __ptrace_may_access(task, mode, 0);
64907+ task_unlock(task);
64908+ return !err;
64909+}
64910+
64911+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64912+{
64913+ int err;
64914+ task_lock(task);
64915+ err = __ptrace_may_access(task, mode, 1);
64916 task_unlock(task);
64917 return !err;
64918 }
64919@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_str
64920 goto out;
64921
64922 task_lock(task);
64923- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64924+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64925 task_unlock(task);
64926 if (retval)
64927 goto unlock_creds;
64928@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_str
64929 task->ptrace = PT_PTRACED;
64930 if (seize)
64931 task->ptrace |= PT_SEIZED;
64932- if (task_ns_capable(task, CAP_SYS_PTRACE))
64933+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64934 task->ptrace |= PT_PTRACE_CAP;
64935
64936 __ptrace_link(task, current);
64937@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *
64938 {
64939 int copied = 0;
64940
64941+ pax_track_stack();
64942+
64943 while (len > 0) {
64944 char buf[128];
64945 int this_len, retval;
64946@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *
64947 break;
64948 return -EIO;
64949 }
64950- if (copy_to_user(dst, buf, retval))
64951+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64952 return -EFAULT;
64953 copied += retval;
64954 src += retval;
64955@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct
64956 {
64957 int copied = 0;
64958
64959+ pax_track_stack();
64960+
64961 while (len > 0) {
64962 char buf[128];
64963 int this_len, retval;
64964@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *c
64965 bool seized = child->ptrace & PT_SEIZED;
64966 int ret = -EIO;
64967 siginfo_t siginfo, *si;
64968- void __user *datavp = (void __user *) data;
64969+ void __user *datavp = (__force void __user *) data;
64970 unsigned long __user *datalp = datavp;
64971 unsigned long flags;
64972
64973+ pax_track_stack();
64974+
64975 switch (request) {
64976 case PTRACE_PEEKTEXT:
64977 case PTRACE_PEEKDATA:
64978@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64979 goto out;
64980 }
64981
64982+ if (gr_handle_ptrace(child, request)) {
64983+ ret = -EPERM;
64984+ goto out_put_task_struct;
64985+ }
64986+
64987 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64988 ret = ptrace_attach(child, request, data);
64989 /*
64990 * Some architectures need to do book-keeping after
64991 * a ptrace attach.
64992 */
64993- if (!ret)
64994+ if (!ret) {
64995 arch_ptrace_attach(child);
64996+ gr_audit_ptrace(child);
64997+ }
64998 goto out_put_task_struct;
64999 }
65000
65001@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_
65002 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65003 if (copied != sizeof(tmp))
65004 return -EIO;
65005- return put_user(tmp, (unsigned long __user *)data);
65006+ return put_user(tmp, (__force unsigned long __user *)data);
65007 }
65008
65009 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65010@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_st
65011 siginfo_t siginfo;
65012 int ret;
65013
65014+ pax_track_stack();
65015+
65016 switch (request) {
65017 case PTRACE_PEEKTEXT:
65018 case PTRACE_PEEKDATA:
65019@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat
65020 goto out;
65021 }
65022
65023+ if (gr_handle_ptrace(child, request)) {
65024+ ret = -EPERM;
65025+ goto out_put_task_struct;
65026+ }
65027+
65028 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65029 ret = ptrace_attach(child, request, data);
65030 /*
65031 * Some architectures need to do book-keeping after
65032 * a ptrace attach.
65033 */
65034- if (!ret)
65035+ if (!ret) {
65036 arch_ptrace_attach(child);
65037+ gr_audit_ptrace(child);
65038+ }
65039 goto out_put_task_struct;
65040 }
65041
65042diff -urNp linux-3.1.1/kernel/rcutorture.c linux-3.1.1/kernel/rcutorture.c
65043--- linux-3.1.1/kernel/rcutorture.c 2011-11-11 15:19:27.000000000 -0500
65044+++ linux-3.1.1/kernel/rcutorture.c 2011-11-16 18:39:08.000000000 -0500
65045@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65046 { 0 };
65047 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65048 { 0 };
65049-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65050-static atomic_t n_rcu_torture_alloc;
65051-static atomic_t n_rcu_torture_alloc_fail;
65052-static atomic_t n_rcu_torture_free;
65053-static atomic_t n_rcu_torture_mberror;
65054-static atomic_t n_rcu_torture_error;
65055+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65056+static atomic_unchecked_t n_rcu_torture_alloc;
65057+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65058+static atomic_unchecked_t n_rcu_torture_free;
65059+static atomic_unchecked_t n_rcu_torture_mberror;
65060+static atomic_unchecked_t n_rcu_torture_error;
65061 static long n_rcu_torture_boost_ktrerror;
65062 static long n_rcu_torture_boost_rterror;
65063 static long n_rcu_torture_boost_failure;
65064@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65065
65066 spin_lock_bh(&rcu_torture_lock);
65067 if (list_empty(&rcu_torture_freelist)) {
65068- atomic_inc(&n_rcu_torture_alloc_fail);
65069+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65070 spin_unlock_bh(&rcu_torture_lock);
65071 return NULL;
65072 }
65073- atomic_inc(&n_rcu_torture_alloc);
65074+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65075 p = rcu_torture_freelist.next;
65076 list_del_init(p);
65077 spin_unlock_bh(&rcu_torture_lock);
65078@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65079 static void
65080 rcu_torture_free(struct rcu_torture *p)
65081 {
65082- atomic_inc(&n_rcu_torture_free);
65083+ atomic_inc_unchecked(&n_rcu_torture_free);
65084 spin_lock_bh(&rcu_torture_lock);
65085 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65086 spin_unlock_bh(&rcu_torture_lock);
65087@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65088 i = rp->rtort_pipe_count;
65089 if (i > RCU_TORTURE_PIPE_LEN)
65090 i = RCU_TORTURE_PIPE_LEN;
65091- atomic_inc(&rcu_torture_wcount[i]);
65092+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65093 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65094 rp->rtort_mbtest = 0;
65095 rcu_torture_free(rp);
65096@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
65097 i = rp->rtort_pipe_count;
65098 if (i > RCU_TORTURE_PIPE_LEN)
65099 i = RCU_TORTURE_PIPE_LEN;
65100- atomic_inc(&rcu_torture_wcount[i]);
65101+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65102 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65103 rp->rtort_mbtest = 0;
65104 list_del(&rp->rtort_free);
65105@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
65106 i = old_rp->rtort_pipe_count;
65107 if (i > RCU_TORTURE_PIPE_LEN)
65108 i = RCU_TORTURE_PIPE_LEN;
65109- atomic_inc(&rcu_torture_wcount[i]);
65110+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65111 old_rp->rtort_pipe_count++;
65112 cur_ops->deferred_free(old_rp);
65113 }
65114@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned l
65115 return;
65116 }
65117 if (p->rtort_mbtest == 0)
65118- atomic_inc(&n_rcu_torture_mberror);
65119+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65120 spin_lock(&rand_lock);
65121 cur_ops->read_delay(&rand);
65122 n_rcu_torture_timers++;
65123@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
65124 continue;
65125 }
65126 if (p->rtort_mbtest == 0)
65127- atomic_inc(&n_rcu_torture_mberror);
65128+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65129 cur_ops->read_delay(&rand);
65130 preempt_disable();
65131 pipe_count = p->rtort_pipe_count;
65132@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
65133 rcu_torture_current,
65134 rcu_torture_current_version,
65135 list_empty(&rcu_torture_freelist),
65136- atomic_read(&n_rcu_torture_alloc),
65137- atomic_read(&n_rcu_torture_alloc_fail),
65138- atomic_read(&n_rcu_torture_free),
65139- atomic_read(&n_rcu_torture_mberror),
65140+ atomic_read_unchecked(&n_rcu_torture_alloc),
65141+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65142+ atomic_read_unchecked(&n_rcu_torture_free),
65143+ atomic_read_unchecked(&n_rcu_torture_mberror),
65144 n_rcu_torture_boost_ktrerror,
65145 n_rcu_torture_boost_rterror,
65146 n_rcu_torture_boost_failure,
65147 n_rcu_torture_boosts,
65148 n_rcu_torture_timers);
65149- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65150+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65151 n_rcu_torture_boost_ktrerror != 0 ||
65152 n_rcu_torture_boost_rterror != 0 ||
65153 n_rcu_torture_boost_failure != 0)
65154@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
65155 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65156 if (i > 1) {
65157 cnt += sprintf(&page[cnt], "!!! ");
65158- atomic_inc(&n_rcu_torture_error);
65159+ atomic_inc_unchecked(&n_rcu_torture_error);
65160 WARN_ON_ONCE(1);
65161 }
65162 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65163@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
65164 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65165 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65166 cnt += sprintf(&page[cnt], " %d",
65167- atomic_read(&rcu_torture_wcount[i]));
65168+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65169 }
65170 cnt += sprintf(&page[cnt], "\n");
65171 if (cur_ops->stats)
65172@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
65173
65174 if (cur_ops->cleanup)
65175 cur_ops->cleanup();
65176- if (atomic_read(&n_rcu_torture_error))
65177+ if (atomic_read_unchecked(&n_rcu_torture_error))
65178 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65179 else
65180 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65181@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
65182
65183 rcu_torture_current = NULL;
65184 rcu_torture_current_version = 0;
65185- atomic_set(&n_rcu_torture_alloc, 0);
65186- atomic_set(&n_rcu_torture_alloc_fail, 0);
65187- atomic_set(&n_rcu_torture_free, 0);
65188- atomic_set(&n_rcu_torture_mberror, 0);
65189- atomic_set(&n_rcu_torture_error, 0);
65190+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65191+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65192+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65193+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65194+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65195 n_rcu_torture_boost_ktrerror = 0;
65196 n_rcu_torture_boost_rterror = 0;
65197 n_rcu_torture_boost_failure = 0;
65198 n_rcu_torture_boosts = 0;
65199 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65200- atomic_set(&rcu_torture_wcount[i], 0);
65201+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65202 for_each_possible_cpu(cpu) {
65203 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65204 per_cpu(rcu_torture_count, cpu)[i] = 0;
65205diff -urNp linux-3.1.1/kernel/rcutree.c linux-3.1.1/kernel/rcutree.c
65206--- linux-3.1.1/kernel/rcutree.c 2011-11-11 15:19:27.000000000 -0500
65207+++ linux-3.1.1/kernel/rcutree.c 2011-11-16 18:39:08.000000000 -0500
65208@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65209 }
65210 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65211 smp_mb__before_atomic_inc(); /* See above. */
65212- atomic_inc(&rdtp->dynticks);
65213+ atomic_inc_unchecked(&rdtp->dynticks);
65214 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65215- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65216+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65217 local_irq_restore(flags);
65218
65219 /* If the interrupt queued a callback, get out of dyntick mode. */
65220@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65221 return;
65222 }
65223 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65224- atomic_inc(&rdtp->dynticks);
65225+ atomic_inc_unchecked(&rdtp->dynticks);
65226 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65227 smp_mb__after_atomic_inc(); /* See above. */
65228- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65229+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65230 local_irq_restore(flags);
65231 }
65232
65233@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65234 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65235
65236 if (rdtp->dynticks_nmi_nesting == 0 &&
65237- (atomic_read(&rdtp->dynticks) & 0x1))
65238+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65239 return;
65240 rdtp->dynticks_nmi_nesting++;
65241 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65242- atomic_inc(&rdtp->dynticks);
65243+ atomic_inc_unchecked(&rdtp->dynticks);
65244 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65245 smp_mb__after_atomic_inc(); /* See above. */
65246- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65247+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65248 }
65249
65250 /**
65251@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65252 return;
65253 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65254 smp_mb__before_atomic_inc(); /* See above. */
65255- atomic_inc(&rdtp->dynticks);
65256+ atomic_inc_unchecked(&rdtp->dynticks);
65257 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65258- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65259+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65260 }
65261
65262 /**
65263@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65264 */
65265 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65266 {
65267- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65268+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65269 return 0;
65270 }
65271
65272@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65273 unsigned long curr;
65274 unsigned long snap;
65275
65276- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65277+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65278 snap = (unsigned long)rdp->dynticks_snap;
65279
65280 /*
65281@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65282 /*
65283 * Do softirq processing for the current CPU.
65284 */
65285-static void rcu_process_callbacks(struct softirq_action *unused)
65286+static void rcu_process_callbacks(void)
65287 {
65288 __rcu_process_callbacks(&rcu_sched_state,
65289 &__get_cpu_var(rcu_sched_data));
65290diff -urNp linux-3.1.1/kernel/rcutree.h linux-3.1.1/kernel/rcutree.h
65291--- linux-3.1.1/kernel/rcutree.h 2011-11-11 15:19:27.000000000 -0500
65292+++ linux-3.1.1/kernel/rcutree.h 2011-11-16 18:39:08.000000000 -0500
65293@@ -86,7 +86,7 @@
65294 struct rcu_dynticks {
65295 int dynticks_nesting; /* Track irq/process nesting level. */
65296 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65297- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65298+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65299 };
65300
65301 /* RCU's kthread states for tracing. */
65302diff -urNp linux-3.1.1/kernel/rcutree_plugin.h linux-3.1.1/kernel/rcutree_plugin.h
65303--- linux-3.1.1/kernel/rcutree_plugin.h 2011-11-11 15:19:27.000000000 -0500
65304+++ linux-3.1.1/kernel/rcutree_plugin.h 2011-11-16 18:39:08.000000000 -0500
65305@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65306
65307 /* Clean up and exit. */
65308 smp_mb(); /* ensure expedited GP seen before counter increment. */
65309- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65310+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65311 unlock_mb_ret:
65312 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65313 mb_ret:
65314@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65315
65316 #else /* #ifndef CONFIG_SMP */
65317
65318-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65319-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65320+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65321+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65322
65323 static int synchronize_sched_expedited_cpu_stop(void *data)
65324 {
65325@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65326 int firstsnap, s, snap, trycount = 0;
65327
65328 /* Note that atomic_inc_return() implies full memory barrier. */
65329- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65330+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65331 get_online_cpus();
65332
65333 /*
65334@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65335 }
65336
65337 /* Check to see if someone else did our work for us. */
65338- s = atomic_read(&sync_sched_expedited_done);
65339+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65340 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65341 smp_mb(); /* ensure test happens before caller kfree */
65342 return;
65343@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65344 * grace period works for us.
65345 */
65346 get_online_cpus();
65347- snap = atomic_read(&sync_sched_expedited_started) - 1;
65348+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65349 smp_mb(); /* ensure read is before try_stop_cpus(). */
65350 }
65351
65352@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65353 * than we did beat us to the punch.
65354 */
65355 do {
65356- s = atomic_read(&sync_sched_expedited_done);
65357+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65358 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65359 smp_mb(); /* ensure test happens before caller kfree */
65360 break;
65361 }
65362- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65363+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65364
65365 put_online_cpus();
65366 }
65367diff -urNp linux-3.1.1/kernel/relay.c linux-3.1.1/kernel/relay.c
65368--- linux-3.1.1/kernel/relay.c 2011-11-11 15:19:27.000000000 -0500
65369+++ linux-3.1.1/kernel/relay.c 2011-11-16 18:40:44.000000000 -0500
65370@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65371 };
65372 ssize_t ret;
65373
65374+ pax_track_stack();
65375+
65376 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65377 return 0;
65378 if (splice_grow_spd(pipe, &spd))
65379diff -urNp linux-3.1.1/kernel/resource.c linux-3.1.1/kernel/resource.c
65380--- linux-3.1.1/kernel/resource.c 2011-11-11 15:19:27.000000000 -0500
65381+++ linux-3.1.1/kernel/resource.c 2011-11-16 18:40:44.000000000 -0500
65382@@ -141,8 +141,18 @@ static const struct file_operations proc
65383
65384 static int __init ioresources_init(void)
65385 {
65386+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65387+#ifdef CONFIG_GRKERNSEC_PROC_USER
65388+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65389+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65390+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65391+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65392+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65393+#endif
65394+#else
65395 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65396 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65397+#endif
65398 return 0;
65399 }
65400 __initcall(ioresources_init);
65401diff -urNp linux-3.1.1/kernel/rtmutex-tester.c linux-3.1.1/kernel/rtmutex-tester.c
65402--- linux-3.1.1/kernel/rtmutex-tester.c 2011-11-11 15:19:27.000000000 -0500
65403+++ linux-3.1.1/kernel/rtmutex-tester.c 2011-11-16 18:39:08.000000000 -0500
65404@@ -20,7 +20,7 @@
65405 #define MAX_RT_TEST_MUTEXES 8
65406
65407 static spinlock_t rttest_lock;
65408-static atomic_t rttest_event;
65409+static atomic_unchecked_t rttest_event;
65410
65411 struct test_thread_data {
65412 int opcode;
65413@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65414
65415 case RTTEST_LOCKCONT:
65416 td->mutexes[td->opdata] = 1;
65417- td->event = atomic_add_return(1, &rttest_event);
65418+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65419 return 0;
65420
65421 case RTTEST_RESET:
65422@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65423 return 0;
65424
65425 case RTTEST_RESETEVENT:
65426- atomic_set(&rttest_event, 0);
65427+ atomic_set_unchecked(&rttest_event, 0);
65428 return 0;
65429
65430 default:
65431@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65432 return ret;
65433
65434 td->mutexes[id] = 1;
65435- td->event = atomic_add_return(1, &rttest_event);
65436+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65437 rt_mutex_lock(&mutexes[id]);
65438- td->event = atomic_add_return(1, &rttest_event);
65439+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65440 td->mutexes[id] = 4;
65441 return 0;
65442
65443@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65444 return ret;
65445
65446 td->mutexes[id] = 1;
65447- td->event = atomic_add_return(1, &rttest_event);
65448+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65449 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65450- td->event = atomic_add_return(1, &rttest_event);
65451+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65452 td->mutexes[id] = ret ? 0 : 4;
65453 return ret ? -EINTR : 0;
65454
65455@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65456 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65457 return ret;
65458
65459- td->event = atomic_add_return(1, &rttest_event);
65460+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65461 rt_mutex_unlock(&mutexes[id]);
65462- td->event = atomic_add_return(1, &rttest_event);
65463+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65464 td->mutexes[id] = 0;
65465 return 0;
65466
65467@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65468 break;
65469
65470 td->mutexes[dat] = 2;
65471- td->event = atomic_add_return(1, &rttest_event);
65472+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65473 break;
65474
65475 default:
65476@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65477 return;
65478
65479 td->mutexes[dat] = 3;
65480- td->event = atomic_add_return(1, &rttest_event);
65481+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65482 break;
65483
65484 case RTTEST_LOCKNOWAIT:
65485@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65486 return;
65487
65488 td->mutexes[dat] = 1;
65489- td->event = atomic_add_return(1, &rttest_event);
65490+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65491 return;
65492
65493 default:
65494diff -urNp linux-3.1.1/kernel/sched_autogroup.c linux-3.1.1/kernel/sched_autogroup.c
65495--- linux-3.1.1/kernel/sched_autogroup.c 2011-11-11 15:19:27.000000000 -0500
65496+++ linux-3.1.1/kernel/sched_autogroup.c 2011-11-16 18:39:08.000000000 -0500
65497@@ -7,7 +7,7 @@
65498
65499 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65500 static struct autogroup autogroup_default;
65501-static atomic_t autogroup_seq_nr;
65502+static atomic_unchecked_t autogroup_seq_nr;
65503
65504 static void __init autogroup_init(struct task_struct *init_task)
65505 {
65506@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65507
65508 kref_init(&ag->kref);
65509 init_rwsem(&ag->lock);
65510- ag->id = atomic_inc_return(&autogroup_seq_nr);
65511+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65512 ag->tg = tg;
65513 #ifdef CONFIG_RT_GROUP_SCHED
65514 /*
65515diff -urNp linux-3.1.1/kernel/sched.c linux-3.1.1/kernel/sched.c
65516--- linux-3.1.1/kernel/sched.c 2011-11-11 15:19:27.000000000 -0500
65517+++ linux-3.1.1/kernel/sched.c 2011-11-16 18:40:44.000000000 -0500
65518@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
65519 struct rq *rq;
65520 int cpu;
65521
65522+ pax_track_stack();
65523+
65524 need_resched:
65525 preempt_disable();
65526 cpu = smp_processor_id();
65527@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p
65528 /* convert nice value [19,-20] to rlimit style value [1,40] */
65529 int nice_rlim = 20 - nice;
65530
65531+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65532+
65533 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65534 capable(CAP_SYS_NICE));
65535 }
65536@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65537 if (nice > 19)
65538 nice = 19;
65539
65540- if (increment < 0 && !can_nice(current, nice))
65541+ if (increment < 0 && (!can_nice(current, nice) ||
65542+ gr_handle_chroot_nice()))
65543 return -EPERM;
65544
65545 retval = security_task_setnice(current, nice);
65546@@ -5127,6 +5132,7 @@ recheck:
65547 unsigned long rlim_rtprio =
65548 task_rlimit(p, RLIMIT_RTPRIO);
65549
65550+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65551 /* can't set/change the rt policy */
65552 if (policy != p->policy && !rlim_rtprio)
65553 return -EPERM;
65554diff -urNp linux-3.1.1/kernel/sched_fair.c linux-3.1.1/kernel/sched_fair.c
65555--- linux-3.1.1/kernel/sched_fair.c 2011-11-11 15:19:27.000000000 -0500
65556+++ linux-3.1.1/kernel/sched_fair.c 2011-11-16 18:39:08.000000000 -0500
65557@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_c
65558 * run_rebalance_domains is triggered when needed from the scheduler tick.
65559 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65560 */
65561-static void run_rebalance_domains(struct softirq_action *h)
65562+static void run_rebalance_domains(void)
65563 {
65564 int this_cpu = smp_processor_id();
65565 struct rq *this_rq = cpu_rq(this_cpu);
65566diff -urNp linux-3.1.1/kernel/signal.c linux-3.1.1/kernel/signal.c
65567--- linux-3.1.1/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
65568+++ linux-3.1.1/kernel/signal.c 2011-11-16 19:30:04.000000000 -0500
65569@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65570
65571 int print_fatal_signals __read_mostly;
65572
65573-static void __user *sig_handler(struct task_struct *t, int sig)
65574+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65575 {
65576 return t->sighand->action[sig - 1].sa.sa_handler;
65577 }
65578
65579-static int sig_handler_ignored(void __user *handler, int sig)
65580+static int sig_handler_ignored(__sighandler_t handler, int sig)
65581 {
65582 /* Is it explicitly or implicitly ignored? */
65583 return handler == SIG_IGN ||
65584@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65585 static int sig_task_ignored(struct task_struct *t, int sig,
65586 int from_ancestor_ns)
65587 {
65588- void __user *handler;
65589+ __sighandler_t handler;
65590
65591 handler = sig_handler(t, sig);
65592
65593@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_st
65594 atomic_inc(&user->sigpending);
65595 rcu_read_unlock();
65596
65597+ if (!override_rlimit)
65598+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65599+
65600 if (override_rlimit ||
65601 atomic_read(&user->sigpending) <=
65602 task_rlimit(t, RLIMIT_SIGPENDING)) {
65603@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct
65604
65605 int unhandled_signal(struct task_struct *tsk, int sig)
65606 {
65607- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65608+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65609 if (is_global_init(tsk))
65610 return 1;
65611 if (handler != SIG_IGN && handler != SIG_DFL)
65612@@ -815,6 +818,13 @@ static int check_kill_permission(int sig
65613 }
65614 }
65615
65616+ /* allow glibc communication via tgkill to other threads in our
65617+ thread group */
65618+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65619+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65620+ && gr_handle_signal(t, sig))
65621+ return -EPERM;
65622+
65623 return security_task_kill(t, info, sig, 0);
65624 }
65625
65626@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct si
65627 return send_signal(sig, info, p, 1);
65628 }
65629
65630-static int
65631+int
65632 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65633 {
65634 return send_signal(sig, info, t, 0);
65635@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *
65636 unsigned long int flags;
65637 int ret, blocked, ignored;
65638 struct k_sigaction *action;
65639+ int is_unhandled = 0;
65640
65641 spin_lock_irqsave(&t->sighand->siglock, flags);
65642 action = &t->sighand->action[sig-1];
65643@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *
65644 }
65645 if (action->sa.sa_handler == SIG_DFL)
65646 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65647+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65648+ is_unhandled = 1;
65649 ret = specific_send_sig_info(sig, info, t);
65650 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65651
65652+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65653+ normal operation */
65654+ if (is_unhandled) {
65655+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65656+ gr_handle_crash(t, sig);
65657+ }
65658+
65659 return ret;
65660 }
65661
65662@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct
65663 ret = check_kill_permission(sig, info, p);
65664 rcu_read_unlock();
65665
65666- if (!ret && sig)
65667+ if (!ret && sig) {
65668 ret = do_send_sig_info(sig, info, p, true);
65669+ if (!ret)
65670+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65671+ }
65672
65673 return ret;
65674 }
65675@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr,
65676 {
65677 siginfo_t info;
65678
65679+ pax_track_stack();
65680+
65681 memset(&info, 0, sizeof info);
65682 info.si_signo = signr;
65683 info.si_code = exit_code;
65684@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65685 int error = -ESRCH;
65686
65687 rcu_read_lock();
65688- p = find_task_by_vpid(pid);
65689+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65690+ /* allow glibc communication via tgkill to other threads in our
65691+ thread group */
65692+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65693+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65694+ p = find_task_by_vpid_unrestricted(pid);
65695+ else
65696+#endif
65697+ p = find_task_by_vpid(pid);
65698 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65699 error = check_kill_permission(sig, info, p);
65700 /*
65701diff -urNp linux-3.1.1/kernel/smp.c linux-3.1.1/kernel/smp.c
65702--- linux-3.1.1/kernel/smp.c 2011-11-11 15:19:27.000000000 -0500
65703+++ linux-3.1.1/kernel/smp.c 2011-11-16 18:39:08.000000000 -0500
65704@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65705 }
65706 EXPORT_SYMBOL(smp_call_function);
65707
65708-void ipi_call_lock(void)
65709+void ipi_call_lock(void) __acquires(call_function.lock)
65710 {
65711 raw_spin_lock(&call_function.lock);
65712 }
65713
65714-void ipi_call_unlock(void)
65715+void ipi_call_unlock(void) __releases(call_function.lock)
65716 {
65717 raw_spin_unlock(&call_function.lock);
65718 }
65719
65720-void ipi_call_lock_irq(void)
65721+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65722 {
65723 raw_spin_lock_irq(&call_function.lock);
65724 }
65725
65726-void ipi_call_unlock_irq(void)
65727+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65728 {
65729 raw_spin_unlock_irq(&call_function.lock);
65730 }
65731diff -urNp linux-3.1.1/kernel/softirq.c linux-3.1.1/kernel/softirq.c
65732--- linux-3.1.1/kernel/softirq.c 2011-11-11 15:19:27.000000000 -0500
65733+++ linux-3.1.1/kernel/softirq.c 2011-11-16 18:39:08.000000000 -0500
65734@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65735
65736 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65737
65738-char *softirq_to_name[NR_SOFTIRQS] = {
65739+const char * const softirq_to_name[NR_SOFTIRQS] = {
65740 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65741 "TASKLET", "SCHED", "HRTIMER", "RCU"
65742 };
65743@@ -235,7 +235,7 @@ restart:
65744 kstat_incr_softirqs_this_cpu(vec_nr);
65745
65746 trace_softirq_entry(vec_nr);
65747- h->action(h);
65748+ h->action();
65749 trace_softirq_exit(vec_nr);
65750 if (unlikely(prev_count != preempt_count())) {
65751 printk(KERN_ERR "huh, entered softirq %u %s %p"
65752@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65753 local_irq_restore(flags);
65754 }
65755
65756-void open_softirq(int nr, void (*action)(struct softirq_action *))
65757+void open_softirq(int nr, void (*action)(void))
65758 {
65759- softirq_vec[nr].action = action;
65760+ pax_open_kernel();
65761+ *(void **)&softirq_vec[nr].action = action;
65762+ pax_close_kernel();
65763 }
65764
65765 /*
65766@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65767
65768 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65769
65770-static void tasklet_action(struct softirq_action *a)
65771+static void tasklet_action(void)
65772 {
65773 struct tasklet_struct *list;
65774
65775@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65776 }
65777 }
65778
65779-static void tasklet_hi_action(struct softirq_action *a)
65780+static void tasklet_hi_action(void)
65781 {
65782 struct tasklet_struct *list;
65783
65784diff -urNp linux-3.1.1/kernel/sys.c linux-3.1.1/kernel/sys.c
65785--- linux-3.1.1/kernel/sys.c 2011-11-11 15:19:27.000000000 -0500
65786+++ linux-3.1.1/kernel/sys.c 2011-11-16 18:40:44.000000000 -0500
65787@@ -157,6 +157,12 @@ static int set_one_prio(struct task_stru
65788 error = -EACCES;
65789 goto out;
65790 }
65791+
65792+ if (gr_handle_chroot_setpriority(p, niceval)) {
65793+ error = -EACCES;
65794+ goto out;
65795+ }
65796+
65797 no_nice = security_task_setnice(p, niceval);
65798 if (no_nice) {
65799 error = no_nice;
65800@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65801 goto error;
65802 }
65803
65804+ if (gr_check_group_change(new->gid, new->egid, -1))
65805+ goto error;
65806+
65807 if (rgid != (gid_t) -1 ||
65808 (egid != (gid_t) -1 && egid != old->gid))
65809 new->sgid = new->egid;
65810@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65811 old = current_cred();
65812
65813 retval = -EPERM;
65814+
65815+ if (gr_check_group_change(gid, gid, gid))
65816+ goto error;
65817+
65818 if (nsown_capable(CAP_SETGID))
65819 new->gid = new->egid = new->sgid = new->fsgid = gid;
65820 else if (gid == old->gid || gid == old->sgid)
65821@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65822 goto error;
65823 }
65824
65825+ if (gr_check_user_change(new->uid, new->euid, -1))
65826+ goto error;
65827+
65828 if (new->uid != old->uid) {
65829 retval = set_user(new);
65830 if (retval < 0)
65831@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65832 old = current_cred();
65833
65834 retval = -EPERM;
65835+
65836+ if (gr_check_crash_uid(uid))
65837+ goto error;
65838+ if (gr_check_user_change(uid, uid, uid))
65839+ goto error;
65840+
65841 if (nsown_capable(CAP_SETUID)) {
65842 new->suid = new->uid = uid;
65843 if (uid != old->uid) {
65844@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65845 goto error;
65846 }
65847
65848+ if (gr_check_user_change(ruid, euid, -1))
65849+ goto error;
65850+
65851 if (ruid != (uid_t) -1) {
65852 new->uid = ruid;
65853 if (ruid != old->uid) {
65854@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65855 goto error;
65856 }
65857
65858+ if (gr_check_group_change(rgid, egid, -1))
65859+ goto error;
65860+
65861 if (rgid != (gid_t) -1)
65862 new->gid = rgid;
65863 if (egid != (gid_t) -1)
65864@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65865 old = current_cred();
65866 old_fsuid = old->fsuid;
65867
65868+ if (gr_check_user_change(-1, -1, uid))
65869+ goto error;
65870+
65871 if (uid == old->uid || uid == old->euid ||
65872 uid == old->suid || uid == old->fsuid ||
65873 nsown_capable(CAP_SETUID)) {
65874@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65875 }
65876 }
65877
65878+error:
65879 abort_creds(new);
65880 return old_fsuid;
65881
65882@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65883 if (gid == old->gid || gid == old->egid ||
65884 gid == old->sgid || gid == old->fsgid ||
65885 nsown_capable(CAP_SETGID)) {
65886+ if (gr_check_group_change(-1, -1, gid))
65887+ goto error;
65888+
65889 if (gid != old_fsgid) {
65890 new->fsgid = gid;
65891 goto change_okay;
65892 }
65893 }
65894
65895+error:
65896 abort_creds(new);
65897 return old_fsgid;
65898
65899@@ -1242,19 +1278,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65900 return -EFAULT;
65901
65902 down_read(&uts_sem);
65903- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65904+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65905 __OLD_UTS_LEN);
65906 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65907- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65908+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65909 __OLD_UTS_LEN);
65910 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65911- error |= __copy_to_user(&name->release, &utsname()->release,
65912+ error |= __copy_to_user(name->release, &utsname()->release,
65913 __OLD_UTS_LEN);
65914 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65915- error |= __copy_to_user(&name->version, &utsname()->version,
65916+ error |= __copy_to_user(name->version, &utsname()->version,
65917 __OLD_UTS_LEN);
65918 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65919- error |= __copy_to_user(&name->machine, &utsname()->machine,
65920+ error |= __copy_to_user(name->machine, &utsname()->machine,
65921 __OLD_UTS_LEN);
65922 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65923 up_read(&uts_sem);
65924@@ -1717,7 +1753,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65925 error = get_dumpable(me->mm);
65926 break;
65927 case PR_SET_DUMPABLE:
65928- if (arg2 < 0 || arg2 > 1) {
65929+ if (arg2 > 1) {
65930 error = -EINVAL;
65931 break;
65932 }
65933diff -urNp linux-3.1.1/kernel/sysctl_binary.c linux-3.1.1/kernel/sysctl_binary.c
65934--- linux-3.1.1/kernel/sysctl_binary.c 2011-11-11 15:19:27.000000000 -0500
65935+++ linux-3.1.1/kernel/sysctl_binary.c 2011-11-16 18:39:08.000000000 -0500
65936@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65937 int i;
65938
65939 set_fs(KERNEL_DS);
65940- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65941+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65942 set_fs(old_fs);
65943 if (result < 0)
65944 goto out_kfree;
65945@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65946 }
65947
65948 set_fs(KERNEL_DS);
65949- result = vfs_write(file, buffer, str - buffer, &pos);
65950+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65951 set_fs(old_fs);
65952 if (result < 0)
65953 goto out_kfree;
65954@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65955 int i;
65956
65957 set_fs(KERNEL_DS);
65958- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65959+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65960 set_fs(old_fs);
65961 if (result < 0)
65962 goto out_kfree;
65963@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65964 }
65965
65966 set_fs(KERNEL_DS);
65967- result = vfs_write(file, buffer, str - buffer, &pos);
65968+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65969 set_fs(old_fs);
65970 if (result < 0)
65971 goto out_kfree;
65972@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
65973 int i;
65974
65975 set_fs(KERNEL_DS);
65976- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65977+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65978 set_fs(old_fs);
65979 if (result < 0)
65980 goto out;
65981@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
65982 __le16 dnaddr;
65983
65984 set_fs(KERNEL_DS);
65985- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65986+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65987 set_fs(old_fs);
65988 if (result < 0)
65989 goto out;
65990@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
65991 le16_to_cpu(dnaddr) & 0x3ff);
65992
65993 set_fs(KERNEL_DS);
65994- result = vfs_write(file, buf, len, &pos);
65995+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
65996 set_fs(old_fs);
65997 if (result < 0)
65998 goto out;
65999diff -urNp linux-3.1.1/kernel/sysctl.c linux-3.1.1/kernel/sysctl.c
66000--- linux-3.1.1/kernel/sysctl.c 2011-11-11 15:19:27.000000000 -0500
66001+++ linux-3.1.1/kernel/sysctl.c 2011-11-16 18:40:44.000000000 -0500
66002@@ -85,6 +85,13 @@
66003
66004
66005 #if defined(CONFIG_SYSCTL)
66006+#include <linux/grsecurity.h>
66007+#include <linux/grinternal.h>
66008+
66009+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66010+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66011+ const int op);
66012+extern int gr_handle_chroot_sysctl(const int op);
66013
66014 /* External variables not in a header file. */
66015 extern int sysctl_overcommit_memory;
66016@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
66017 }
66018
66019 #endif
66020+extern struct ctl_table grsecurity_table[];
66021
66022 static struct ctl_table root_table[];
66023 static struct ctl_table_root sysctl_table_root;
66024@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
66025 int sysctl_legacy_va_layout;
66026 #endif
66027
66028+#ifdef CONFIG_PAX_SOFTMODE
66029+static ctl_table pax_table[] = {
66030+ {
66031+ .procname = "softmode",
66032+ .data = &pax_softmode,
66033+ .maxlen = sizeof(unsigned int),
66034+ .mode = 0600,
66035+ .proc_handler = &proc_dointvec,
66036+ },
66037+
66038+ { }
66039+};
66040+#endif
66041+
66042 /* The default sysctl tables: */
66043
66044 static struct ctl_table root_table[] = {
66045@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
66046 #endif
66047
66048 static struct ctl_table kern_table[] = {
66049+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66050+ {
66051+ .procname = "grsecurity",
66052+ .mode = 0500,
66053+ .child = grsecurity_table,
66054+ },
66055+#endif
66056+
66057+#ifdef CONFIG_PAX_SOFTMODE
66058+ {
66059+ .procname = "pax",
66060+ .mode = 0500,
66061+ .child = pax_table,
66062+ },
66063+#endif
66064+
66065 {
66066 .procname = "sched_child_runs_first",
66067 .data = &sysctl_sched_child_runs_first,
66068@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
66069 .data = &modprobe_path,
66070 .maxlen = KMOD_PATH_LEN,
66071 .mode = 0644,
66072- .proc_handler = proc_dostring,
66073+ .proc_handler = proc_dostring_modpriv,
66074 },
66075 {
66076 .procname = "modules_disabled",
66077@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
66078 .extra1 = &zero,
66079 .extra2 = &one,
66080 },
66081+#endif
66082 {
66083 .procname = "kptr_restrict",
66084 .data = &kptr_restrict,
66085 .maxlen = sizeof(int),
66086 .mode = 0644,
66087 .proc_handler = proc_dmesg_restrict,
66088+#ifdef CONFIG_GRKERNSEC_HIDESYM
66089+ .extra1 = &two,
66090+#else
66091 .extra1 = &zero,
66092+#endif
66093 .extra2 = &two,
66094 },
66095-#endif
66096 {
66097 .procname = "ngroups_max",
66098 .data = &ngroups_max,
66099@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
66100 .proc_handler = proc_dointvec_minmax,
66101 .extra1 = &zero,
66102 },
66103+ {
66104+ .procname = "heap_stack_gap",
66105+ .data = &sysctl_heap_stack_gap,
66106+ .maxlen = sizeof(sysctl_heap_stack_gap),
66107+ .mode = 0644,
66108+ .proc_handler = proc_doulongvec_minmax,
66109+ },
66110 #else
66111 {
66112 .procname = "nr_trim_pages",
66113@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
66114 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66115 {
66116 int mode;
66117+ int error;
66118+
66119+ if (table->parent != NULL && table->parent->procname != NULL &&
66120+ table->procname != NULL &&
66121+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66122+ return -EACCES;
66123+ if (gr_handle_chroot_sysctl(op))
66124+ return -EACCES;
66125+ error = gr_handle_sysctl(table, op);
66126+ if (error)
66127+ return error;
66128
66129 if (root->permissions)
66130 mode = root->permissions(root, current->nsproxy, table);
66131@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *tabl
66132 buffer, lenp, ppos);
66133 }
66134
66135+int proc_dostring_modpriv(struct ctl_table *table, int write,
66136+ void __user *buffer, size_t *lenp, loff_t *ppos)
66137+{
66138+ if (write && !capable(CAP_SYS_MODULE))
66139+ return -EPERM;
66140+
66141+ return _proc_do_string(table->data, table->maxlen, write,
66142+ buffer, lenp, ppos);
66143+}
66144+
66145 static size_t proc_skip_spaces(char **buf)
66146 {
66147 size_t ret;
66148@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **b
66149 len = strlen(tmp);
66150 if (len > *size)
66151 len = *size;
66152+ if (len > sizeof(tmp))
66153+ len = sizeof(tmp);
66154 if (copy_to_user(*buf, tmp, len))
66155 return -EFAULT;
66156 *size -= len;
66157@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(v
66158 *i = val;
66159 } else {
66160 val = convdiv * (*i) / convmul;
66161- if (!first)
66162+ if (!first) {
66163 err = proc_put_char(&buffer, &left, '\t');
66164+ if (err)
66165+ break;
66166+ }
66167 err = proc_put_long(&buffer, &left, val, false);
66168 if (err)
66169 break;
66170@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *tabl
66171 return -ENOSYS;
66172 }
66173
66174+int proc_dostring_modpriv(struct ctl_table *table, int write,
66175+ void __user *buffer, size_t *lenp, loff_t *ppos)
66176+{
66177+ return -ENOSYS;
66178+}
66179+
66180 int proc_dointvec(struct ctl_table *table, int write,
66181 void __user *buffer, size_t *lenp, loff_t *ppos)
66182 {
66183@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66184 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66185 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66186 EXPORT_SYMBOL(proc_dostring);
66187+EXPORT_SYMBOL(proc_dostring_modpriv);
66188 EXPORT_SYMBOL(proc_doulongvec_minmax);
66189 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66190 EXPORT_SYMBOL(register_sysctl_table);
66191diff -urNp linux-3.1.1/kernel/sysctl_check.c linux-3.1.1/kernel/sysctl_check.c
66192--- linux-3.1.1/kernel/sysctl_check.c 2011-11-11 15:19:27.000000000 -0500
66193+++ linux-3.1.1/kernel/sysctl_check.c 2011-11-16 18:40:44.000000000 -0500
66194@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66195 set_fail(&fail, table, "Directory with extra2");
66196 } else {
66197 if ((table->proc_handler == proc_dostring) ||
66198+ (table->proc_handler == proc_dostring_modpriv) ||
66199 (table->proc_handler == proc_dointvec) ||
66200 (table->proc_handler == proc_dointvec_minmax) ||
66201 (table->proc_handler == proc_dointvec_jiffies) ||
66202diff -urNp linux-3.1.1/kernel/taskstats.c linux-3.1.1/kernel/taskstats.c
66203--- linux-3.1.1/kernel/taskstats.c 2011-11-11 15:19:27.000000000 -0500
66204+++ linux-3.1.1/kernel/taskstats.c 2011-11-16 19:35:09.000000000 -0500
66205@@ -27,9 +27,12 @@
66206 #include <linux/cgroup.h>
66207 #include <linux/fs.h>
66208 #include <linux/file.h>
66209+#include <linux/grsecurity.h>
66210 #include <net/genetlink.h>
66211 #include <linux/atomic.h>
66212
66213+extern int gr_is_taskstats_denied(int pid);
66214+
66215 /*
66216 * Maximum length of a cpumask that can be specified in
66217 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66218@@ -556,6 +559,9 @@ err:
66219
66220 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66221 {
66222+ if (gr_is_taskstats_denied(current->pid))
66223+ return -EACCES;
66224+
66225 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66226 return cmd_attr_register_cpumask(info);
66227 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66228diff -urNp linux-3.1.1/kernel/time/alarmtimer.c linux-3.1.1/kernel/time/alarmtimer.c
66229--- linux-3.1.1/kernel/time/alarmtimer.c 2011-11-11 15:19:27.000000000 -0500
66230+++ linux-3.1.1/kernel/time/alarmtimer.c 2011-11-16 18:39:08.000000000 -0500
66231@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66232 {
66233 int error = 0;
66234 int i;
66235- struct k_clock alarm_clock = {
66236+ static struct k_clock alarm_clock = {
66237 .clock_getres = alarm_clock_getres,
66238 .clock_get = alarm_clock_get,
66239 .timer_create = alarm_timer_create,
66240diff -urNp linux-3.1.1/kernel/time/tick-broadcast.c linux-3.1.1/kernel/time/tick-broadcast.c
66241--- linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-11 15:19:27.000000000 -0500
66242+++ linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-16 18:39:08.000000000 -0500
66243@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66244 * then clear the broadcast bit.
66245 */
66246 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66247- int cpu = smp_processor_id();
66248+ cpu = smp_processor_id();
66249
66250 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66251 tick_broadcast_clear_oneshot(cpu);
66252diff -urNp linux-3.1.1/kernel/time/timekeeping.c linux-3.1.1/kernel/time/timekeeping.c
66253--- linux-3.1.1/kernel/time/timekeeping.c 2011-11-11 15:19:27.000000000 -0500
66254+++ linux-3.1.1/kernel/time/timekeeping.c 2011-11-16 18:40:44.000000000 -0500
66255@@ -14,6 +14,7 @@
66256 #include <linux/init.h>
66257 #include <linux/mm.h>
66258 #include <linux/sched.h>
66259+#include <linux/grsecurity.h>
66260 #include <linux/syscore_ops.h>
66261 #include <linux/clocksource.h>
66262 #include <linux/jiffies.h>
66263@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66264 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66265 return -EINVAL;
66266
66267+ gr_log_timechange();
66268+
66269 write_seqlock_irqsave(&xtime_lock, flags);
66270
66271 timekeeping_forward_now();
66272diff -urNp linux-3.1.1/kernel/time/timer_list.c linux-3.1.1/kernel/time/timer_list.c
66273--- linux-3.1.1/kernel/time/timer_list.c 2011-11-11 15:19:27.000000000 -0500
66274+++ linux-3.1.1/kernel/time/timer_list.c 2011-11-16 18:40:44.000000000 -0500
66275@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66276
66277 static void print_name_offset(struct seq_file *m, void *sym)
66278 {
66279+#ifdef CONFIG_GRKERNSEC_HIDESYM
66280+ SEQ_printf(m, "<%p>", NULL);
66281+#else
66282 char symname[KSYM_NAME_LEN];
66283
66284 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66285 SEQ_printf(m, "<%pK>", sym);
66286 else
66287 SEQ_printf(m, "%s", symname);
66288+#endif
66289 }
66290
66291 static void
66292@@ -112,7 +116,11 @@ next_one:
66293 static void
66294 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66295 {
66296+#ifdef CONFIG_GRKERNSEC_HIDESYM
66297+ SEQ_printf(m, " .base: %p\n", NULL);
66298+#else
66299 SEQ_printf(m, " .base: %pK\n", base);
66300+#endif
66301 SEQ_printf(m, " .index: %d\n",
66302 base->index);
66303 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66304@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66305 {
66306 struct proc_dir_entry *pe;
66307
66308+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66309+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66310+#else
66311 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66312+#endif
66313 if (!pe)
66314 return -ENOMEM;
66315 return 0;
66316diff -urNp linux-3.1.1/kernel/time/timer_stats.c linux-3.1.1/kernel/time/timer_stats.c
66317--- linux-3.1.1/kernel/time/timer_stats.c 2011-11-11 15:19:27.000000000 -0500
66318+++ linux-3.1.1/kernel/time/timer_stats.c 2011-11-16 18:40:44.000000000 -0500
66319@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66320 static unsigned long nr_entries;
66321 static struct entry entries[MAX_ENTRIES];
66322
66323-static atomic_t overflow_count;
66324+static atomic_unchecked_t overflow_count;
66325
66326 /*
66327 * The entries are in a hash-table, for fast lookup:
66328@@ -140,7 +140,7 @@ static void reset_entries(void)
66329 nr_entries = 0;
66330 memset(entries, 0, sizeof(entries));
66331 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66332- atomic_set(&overflow_count, 0);
66333+ atomic_set_unchecked(&overflow_count, 0);
66334 }
66335
66336 static struct entry *alloc_entry(void)
66337@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66338 if (likely(entry))
66339 entry->count++;
66340 else
66341- atomic_inc(&overflow_count);
66342+ atomic_inc_unchecked(&overflow_count);
66343
66344 out_unlock:
66345 raw_spin_unlock_irqrestore(lock, flags);
66346@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66347
66348 static void print_name_offset(struct seq_file *m, unsigned long addr)
66349 {
66350+#ifdef CONFIG_GRKERNSEC_HIDESYM
66351+ seq_printf(m, "<%p>", NULL);
66352+#else
66353 char symname[KSYM_NAME_LEN];
66354
66355 if (lookup_symbol_name(addr, symname) < 0)
66356 seq_printf(m, "<%p>", (void *)addr);
66357 else
66358 seq_printf(m, "%s", symname);
66359+#endif
66360 }
66361
66362 static int tstats_show(struct seq_file *m, void *v)
66363@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66364
66365 seq_puts(m, "Timer Stats Version: v0.2\n");
66366 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66367- if (atomic_read(&overflow_count))
66368+ if (atomic_read_unchecked(&overflow_count))
66369 seq_printf(m, "Overflow: %d entries\n",
66370- atomic_read(&overflow_count));
66371+ atomic_read_unchecked(&overflow_count));
66372
66373 for (i = 0; i < nr_entries; i++) {
66374 entry = entries + i;
66375@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66376 {
66377 struct proc_dir_entry *pe;
66378
66379+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66380+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66381+#else
66382 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66383+#endif
66384 if (!pe)
66385 return -ENOMEM;
66386 return 0;
66387diff -urNp linux-3.1.1/kernel/time.c linux-3.1.1/kernel/time.c
66388--- linux-3.1.1/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
66389+++ linux-3.1.1/kernel/time.c 2011-11-16 18:40:44.000000000 -0500
66390@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66391 return error;
66392
66393 if (tz) {
66394+ /* we log in do_settimeofday called below, so don't log twice
66395+ */
66396+ if (!tv)
66397+ gr_log_timechange();
66398+
66399 /* SMP safe, global irq locking makes it work. */
66400 sys_tz = *tz;
66401 update_vsyscall_tz();
66402diff -urNp linux-3.1.1/kernel/timer.c linux-3.1.1/kernel/timer.c
66403--- linux-3.1.1/kernel/timer.c 2011-11-11 15:19:27.000000000 -0500
66404+++ linux-3.1.1/kernel/timer.c 2011-11-16 18:39:08.000000000 -0500
66405@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66406 /*
66407 * This function runs timers and the timer-tq in bottom half context.
66408 */
66409-static void run_timer_softirq(struct softirq_action *h)
66410+static void run_timer_softirq(void)
66411 {
66412 struct tvec_base *base = __this_cpu_read(tvec_bases);
66413
66414diff -urNp linux-3.1.1/kernel/trace/blktrace.c linux-3.1.1/kernel/trace/blktrace.c
66415--- linux-3.1.1/kernel/trace/blktrace.c 2011-11-11 15:19:27.000000000 -0500
66416+++ linux-3.1.1/kernel/trace/blktrace.c 2011-11-16 18:39:08.000000000 -0500
66417@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct f
66418 struct blk_trace *bt = filp->private_data;
66419 char buf[16];
66420
66421- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66422+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66423
66424 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66425 }
66426@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(str
66427 return 1;
66428
66429 bt = buf->chan->private_data;
66430- atomic_inc(&bt->dropped);
66431+ atomic_inc_unchecked(&bt->dropped);
66432 return 0;
66433 }
66434
66435@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_qu
66436
66437 bt->dir = dir;
66438 bt->dev = dev;
66439- atomic_set(&bt->dropped, 0);
66440+ atomic_set_unchecked(&bt->dropped, 0);
66441
66442 ret = -EIO;
66443 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66444diff -urNp linux-3.1.1/kernel/trace/ftrace.c linux-3.1.1/kernel/trace/ftrace.c
66445--- linux-3.1.1/kernel/trace/ftrace.c 2011-11-11 15:19:27.000000000 -0500
66446+++ linux-3.1.1/kernel/trace/ftrace.c 2011-11-16 18:39:08.000000000 -0500
66447@@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod,
66448 if (unlikely(ftrace_disabled))
66449 return 0;
66450
66451+ ret = ftrace_arch_code_modify_prepare();
66452+ FTRACE_WARN_ON(ret);
66453+ if (ret)
66454+ return 0;
66455+
66456 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66457+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66458 if (ret) {
66459 ftrace_bug(ret, ip);
66460- return 0;
66461 }
66462- return 1;
66463+ return ret ? 0 : 1;
66464 }
66465
66466 /*
66467@@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct
66468
66469 int
66470 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66471- void *data)
66472+ void *data)
66473 {
66474 struct ftrace_func_probe *entry;
66475 struct ftrace_page *pg;
66476diff -urNp linux-3.1.1/kernel/trace/trace.c linux-3.1.1/kernel/trace/trace.c
66477--- linux-3.1.1/kernel/trace/trace.c 2011-11-11 15:19:27.000000000 -0500
66478+++ linux-3.1.1/kernel/trace/trace.c 2011-11-16 18:40:44.000000000 -0500
66479@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(
66480 size_t rem;
66481 unsigned int i;
66482
66483+ pax_track_stack();
66484+
66485 if (splice_grow_spd(pipe, &spd))
66486 return -ENOMEM;
66487
66488@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file
66489 int entries, size, i;
66490 size_t ret;
66491
66492+ pax_track_stack();
66493+
66494 if (splice_grow_spd(pipe, &spd))
66495 return -ENOMEM;
66496
66497@@ -4093,10 +4097,9 @@ static const struct file_operations trac
66498 };
66499 #endif
66500
66501-static struct dentry *d_tracer;
66502-
66503 struct dentry *tracing_init_dentry(void)
66504 {
66505+ static struct dentry *d_tracer;
66506 static int once;
66507
66508 if (d_tracer)
66509@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
66510 return d_tracer;
66511 }
66512
66513-static struct dentry *d_percpu;
66514-
66515 struct dentry *tracing_dentry_percpu(void)
66516 {
66517+ static struct dentry *d_percpu;
66518 static int once;
66519 struct dentry *d_tracer;
66520
66521diff -urNp linux-3.1.1/kernel/trace/trace_events.c linux-3.1.1/kernel/trace/trace_events.c
66522--- linux-3.1.1/kernel/trace/trace_events.c 2011-11-11 15:19:27.000000000 -0500
66523+++ linux-3.1.1/kernel/trace/trace_events.c 2011-11-16 18:39:08.000000000 -0500
66524@@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list
66525 struct ftrace_module_file_ops {
66526 struct list_head list;
66527 struct module *mod;
66528- struct file_operations id;
66529- struct file_operations enable;
66530- struct file_operations format;
66531- struct file_operations filter;
66532 };
66533
66534 static struct ftrace_module_file_ops *
66535@@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod
66536
66537 file_ops->mod = mod;
66538
66539- file_ops->id = ftrace_event_id_fops;
66540- file_ops->id.owner = mod;
66541-
66542- file_ops->enable = ftrace_enable_fops;
66543- file_ops->enable.owner = mod;
66544-
66545- file_ops->filter = ftrace_event_filter_fops;
66546- file_ops->filter.owner = mod;
66547-
66548- file_ops->format = ftrace_event_format_fops;
66549- file_ops->format.owner = mod;
66550+ pax_open_kernel();
66551+ *(void **)&mod->trace_id.owner = mod;
66552+ *(void **)&mod->trace_enable.owner = mod;
66553+ *(void **)&mod->trace_filter.owner = mod;
66554+ *(void **)&mod->trace_format.owner = mod;
66555+ pax_close_kernel();
66556
66557 list_add(&file_ops->list, &ftrace_module_file_list);
66558
66559@@ -1358,8 +1349,8 @@ static void trace_module_add_events(stru
66560
66561 for_each_event(call, start, end) {
66562 __trace_add_event_call(*call, mod,
66563- &file_ops->id, &file_ops->enable,
66564- &file_ops->filter, &file_ops->format);
66565+ &mod->trace_id, &mod->trace_enable,
66566+ &mod->trace_filter, &mod->trace_format);
66567 }
66568 }
66569
66570diff -urNp linux-3.1.1/kernel/trace/trace_kprobe.c linux-3.1.1/kernel/trace/trace_kprobe.c
66571--- linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-11 15:19:27.000000000 -0500
66572+++ linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-16 18:39:08.000000000 -0500
66573@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66574 long ret;
66575 int maxlen = get_rloc_len(*(u32 *)dest);
66576 u8 *dst = get_rloc_data(dest);
66577- u8 *src = addr;
66578+ const u8 __user *src = (const u8 __force_user *)addr;
66579 mm_segment_t old_fs = get_fs();
66580 if (!maxlen)
66581 return;
66582@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66583 pagefault_disable();
66584 do
66585 ret = __copy_from_user_inatomic(dst++, src++, 1);
66586- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66587+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66588 dst[-1] = '\0';
66589 pagefault_enable();
66590 set_fs(old_fs);
66591@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66592 ((u8 *)get_rloc_data(dest))[0] = '\0';
66593 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66594 } else
66595- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66596+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66597 get_rloc_offs(*(u32 *)dest));
66598 }
66599 /* Return the length of string -- including null terminal byte */
66600@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66601 set_fs(KERNEL_DS);
66602 pagefault_disable();
66603 do {
66604- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66605+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66606 len++;
66607 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66608 pagefault_enable();
66609diff -urNp linux-3.1.1/kernel/trace/trace_mmiotrace.c linux-3.1.1/kernel/trace/trace_mmiotrace.c
66610--- linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-11 15:19:27.000000000 -0500
66611+++ linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-16 18:39:08.000000000 -0500
66612@@ -24,7 +24,7 @@ struct header_iter {
66613 static struct trace_array *mmio_trace_array;
66614 static bool overrun_detected;
66615 static unsigned long prev_overruns;
66616-static atomic_t dropped_count;
66617+static atomic_unchecked_t dropped_count;
66618
66619 static void mmio_reset_data(struct trace_array *tr)
66620 {
66621@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66622
66623 static unsigned long count_overruns(struct trace_iterator *iter)
66624 {
66625- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66626+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66627 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66628
66629 if (over > prev_overruns)
66630@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66631 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66632 sizeof(*entry), 0, pc);
66633 if (!event) {
66634- atomic_inc(&dropped_count);
66635+ atomic_inc_unchecked(&dropped_count);
66636 return;
66637 }
66638 entry = ring_buffer_event_data(event);
66639@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66640 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66641 sizeof(*entry), 0, pc);
66642 if (!event) {
66643- atomic_inc(&dropped_count);
66644+ atomic_inc_unchecked(&dropped_count);
66645 return;
66646 }
66647 entry = ring_buffer_event_data(event);
66648diff -urNp linux-3.1.1/kernel/trace/trace_output.c linux-3.1.1/kernel/trace/trace_output.c
66649--- linux-3.1.1/kernel/trace/trace_output.c 2011-11-11 15:19:27.000000000 -0500
66650+++ linux-3.1.1/kernel/trace/trace_output.c 2011-11-16 18:39:08.000000000 -0500
66651@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66652
66653 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66654 if (!IS_ERR(p)) {
66655- p = mangle_path(s->buffer + s->len, p, "\n");
66656+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66657 if (p) {
66658 s->len = p - s->buffer;
66659 return 1;
66660diff -urNp linux-3.1.1/kernel/trace/trace_stack.c linux-3.1.1/kernel/trace/trace_stack.c
66661--- linux-3.1.1/kernel/trace/trace_stack.c 2011-11-11 15:19:27.000000000 -0500
66662+++ linux-3.1.1/kernel/trace/trace_stack.c 2011-11-16 18:39:08.000000000 -0500
66663@@ -50,7 +50,7 @@ static inline void check_stack(void)
66664 return;
66665
66666 /* we do not handle interrupt stacks yet */
66667- if (!object_is_on_stack(&this_size))
66668+ if (!object_starts_on_stack(&this_size))
66669 return;
66670
66671 local_irq_save(flags);
66672diff -urNp linux-3.1.1/kernel/trace/trace_workqueue.c linux-3.1.1/kernel/trace/trace_workqueue.c
66673--- linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-11 15:19:27.000000000 -0500
66674+++ linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-16 18:39:08.000000000 -0500
66675@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66676 int cpu;
66677 pid_t pid;
66678 /* Can be inserted from interrupt or user context, need to be atomic */
66679- atomic_t inserted;
66680+ atomic_unchecked_t inserted;
66681 /*
66682 * Don't need to be atomic, works are serialized in a single workqueue thread
66683 * on a single CPU.
66684@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66685 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66686 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66687 if (node->pid == wq_thread->pid) {
66688- atomic_inc(&node->inserted);
66689+ atomic_inc_unchecked(&node->inserted);
66690 goto found;
66691 }
66692 }
66693@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66694 tsk = get_pid_task(pid, PIDTYPE_PID);
66695 if (tsk) {
66696 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66697- atomic_read(&cws->inserted), cws->executed,
66698+ atomic_read_unchecked(&cws->inserted), cws->executed,
66699 tsk->comm);
66700 put_task_struct(tsk);
66701 }
66702diff -urNp linux-3.1.1/lib/bitmap.c linux-3.1.1/lib/bitmap.c
66703--- linux-3.1.1/lib/bitmap.c 2011-11-11 15:19:27.000000000 -0500
66704+++ linux-3.1.1/lib/bitmap.c 2011-11-16 18:39:08.000000000 -0500
66705@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsi
66706 {
66707 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66708 u32 chunk;
66709- const char __user *ubuf = buf;
66710+ const char __user *ubuf = (const char __force_user *)buf;
66711
66712 bitmap_zero(maskp, nmaskbits);
66713
66714@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user
66715 {
66716 if (!access_ok(VERIFY_READ, ubuf, ulen))
66717 return -EFAULT;
66718- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66719+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66720 }
66721 EXPORT_SYMBOL(bitmap_parse_user);
66722
66723@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char
66724 {
66725 unsigned a, b;
66726 int c, old_c, totaldigits;
66727- const char __user *ubuf = buf;
66728+ const char __user *ubuf = (const char __force_user *)buf;
66729 int exp_digit, in_range;
66730
66731 totaldigits = c = 0;
66732@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __u
66733 {
66734 if (!access_ok(VERIFY_READ, ubuf, ulen))
66735 return -EFAULT;
66736- return __bitmap_parselist((const char *)ubuf,
66737+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66738 ulen, 1, maskp, nmaskbits);
66739 }
66740 EXPORT_SYMBOL(bitmap_parselist_user);
66741diff -urNp linux-3.1.1/lib/bug.c linux-3.1.1/lib/bug.c
66742--- linux-3.1.1/lib/bug.c 2011-11-11 15:19:27.000000000 -0500
66743+++ linux-3.1.1/lib/bug.c 2011-11-16 18:39:08.000000000 -0500
66744@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66745 return BUG_TRAP_TYPE_NONE;
66746
66747 bug = find_bug(bugaddr);
66748+ if (!bug)
66749+ return BUG_TRAP_TYPE_NONE;
66750
66751 file = NULL;
66752 line = 0;
66753diff -urNp linux-3.1.1/lib/debugobjects.c linux-3.1.1/lib/debugobjects.c
66754--- linux-3.1.1/lib/debugobjects.c 2011-11-11 15:19:27.000000000 -0500
66755+++ linux-3.1.1/lib/debugobjects.c 2011-11-16 18:39:08.000000000 -0500
66756@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66757 if (limit > 4)
66758 return;
66759
66760- is_on_stack = object_is_on_stack(addr);
66761+ is_on_stack = object_starts_on_stack(addr);
66762 if (is_on_stack == onstack)
66763 return;
66764
66765diff -urNp linux-3.1.1/lib/devres.c linux-3.1.1/lib/devres.c
66766--- linux-3.1.1/lib/devres.c 2011-11-11 15:19:27.000000000 -0500
66767+++ linux-3.1.1/lib/devres.c 2011-11-16 18:39:08.000000000 -0500
66768@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
66769 void devm_iounmap(struct device *dev, void __iomem *addr)
66770 {
66771 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66772- (void *)addr));
66773+ (void __force *)addr));
66774 iounmap(addr);
66775 }
66776 EXPORT_SYMBOL(devm_iounmap);
66777@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66778 {
66779 ioport_unmap(addr);
66780 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66781- devm_ioport_map_match, (void *)addr));
66782+ devm_ioport_map_match, (void __force *)addr));
66783 }
66784 EXPORT_SYMBOL(devm_ioport_unmap);
66785
66786diff -urNp linux-3.1.1/lib/dma-debug.c linux-3.1.1/lib/dma-debug.c
66787--- linux-3.1.1/lib/dma-debug.c 2011-11-11 15:19:27.000000000 -0500
66788+++ linux-3.1.1/lib/dma-debug.c 2011-11-16 18:39:08.000000000 -0500
66789@@ -870,7 +870,7 @@ out:
66790
66791 static void check_for_stack(struct device *dev, void *addr)
66792 {
66793- if (object_is_on_stack(addr))
66794+ if (object_starts_on_stack(addr))
66795 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66796 "stack [addr=%p]\n", addr);
66797 }
66798diff -urNp linux-3.1.1/lib/extable.c linux-3.1.1/lib/extable.c
66799--- linux-3.1.1/lib/extable.c 2011-11-11 15:19:27.000000000 -0500
66800+++ linux-3.1.1/lib/extable.c 2011-11-16 18:39:08.000000000 -0500
66801@@ -13,6 +13,7 @@
66802 #include <linux/init.h>
66803 #include <linux/sort.h>
66804 #include <asm/uaccess.h>
66805+#include <asm/pgtable.h>
66806
66807 #ifndef ARCH_HAS_SORT_EXTABLE
66808 /*
66809@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66810 void sort_extable(struct exception_table_entry *start,
66811 struct exception_table_entry *finish)
66812 {
66813+ pax_open_kernel();
66814 sort(start, finish - start, sizeof(struct exception_table_entry),
66815 cmp_ex, NULL);
66816+ pax_close_kernel();
66817 }
66818
66819 #ifdef CONFIG_MODULES
66820diff -urNp linux-3.1.1/lib/inflate.c linux-3.1.1/lib/inflate.c
66821--- linux-3.1.1/lib/inflate.c 2011-11-11 15:19:27.000000000 -0500
66822+++ linux-3.1.1/lib/inflate.c 2011-11-16 18:39:08.000000000 -0500
66823@@ -269,7 +269,7 @@ static void free(void *where)
66824 malloc_ptr = free_mem_ptr;
66825 }
66826 #else
66827-#define malloc(a) kmalloc(a, GFP_KERNEL)
66828+#define malloc(a) kmalloc((a), GFP_KERNEL)
66829 #define free(a) kfree(a)
66830 #endif
66831
66832diff -urNp linux-3.1.1/lib/Kconfig.debug linux-3.1.1/lib/Kconfig.debug
66833--- linux-3.1.1/lib/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
66834+++ linux-3.1.1/lib/Kconfig.debug 2011-11-16 18:40:44.000000000 -0500
66835@@ -1091,6 +1091,7 @@ config LATENCYTOP
66836 depends on DEBUG_KERNEL
66837 depends on STACKTRACE_SUPPORT
66838 depends on PROC_FS
66839+ depends on !GRKERNSEC_HIDESYM
66840 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66841 select KALLSYMS
66842 select KALLSYMS_ALL
66843diff -urNp linux-3.1.1/lib/kref.c linux-3.1.1/lib/kref.c
66844--- linux-3.1.1/lib/kref.c 2011-11-11 15:19:27.000000000 -0500
66845+++ linux-3.1.1/lib/kref.c 2011-11-16 18:39:08.000000000 -0500
66846@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66847 */
66848 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66849 {
66850- WARN_ON(release == NULL);
66851+ BUG_ON(release == NULL);
66852 WARN_ON(release == (void (*)(struct kref *))kfree);
66853
66854 if (atomic_dec_and_test(&kref->refcount)) {
66855diff -urNp linux-3.1.1/lib/radix-tree.c linux-3.1.1/lib/radix-tree.c
66856--- linux-3.1.1/lib/radix-tree.c 2011-11-11 15:19:27.000000000 -0500
66857+++ linux-3.1.1/lib/radix-tree.c 2011-11-16 18:39:08.000000000 -0500
66858@@ -80,7 +80,7 @@ struct radix_tree_preload {
66859 int nr;
66860 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66861 };
66862-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66863+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66864
66865 static inline void *ptr_to_indirect(void *ptr)
66866 {
66867diff -urNp linux-3.1.1/lib/vsprintf.c linux-3.1.1/lib/vsprintf.c
66868--- linux-3.1.1/lib/vsprintf.c 2011-11-11 15:19:27.000000000 -0500
66869+++ linux-3.1.1/lib/vsprintf.c 2011-11-16 18:40:44.000000000 -0500
66870@@ -16,6 +16,9 @@
66871 * - scnprintf and vscnprintf
66872 */
66873
66874+#ifdef CONFIG_GRKERNSEC_HIDESYM
66875+#define __INCLUDED_BY_HIDESYM 1
66876+#endif
66877 #include <stdarg.h>
66878 #include <linux/module.h>
66879 #include <linux/types.h>
66880@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end
66881 char sym[KSYM_SYMBOL_LEN];
66882 if (ext == 'B')
66883 sprint_backtrace(sym, value);
66884- else if (ext != 'f' && ext != 's')
66885+ else if (ext != 'f' && ext != 's' && ext != 'a')
66886 sprint_symbol(sym, value);
66887 else
66888 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66889@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end,
66890 return string(buf, end, uuid, spec);
66891 }
66892
66893+#ifdef CONFIG_GRKERNSEC_HIDESYM
66894+int kptr_restrict __read_mostly = 2;
66895+#else
66896 int kptr_restrict __read_mostly;
66897+#endif
66898
66899 /*
66900 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66901@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
66902 * - 'S' For symbolic direct pointers with offset
66903 * - 's' For symbolic direct pointers without offset
66904 * - 'B' For backtraced symbolic direct pointers with offset
66905+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66906+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66907 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66908 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66909 * - 'M' For a 6-byte MAC address, it prints the address in the
66910@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf
66911 {
66912 if (!ptr && *fmt != 'K') {
66913 /*
66914- * Print (null) with the same width as a pointer so it makes
66915+ * Print (nil) with the same width as a pointer so it makes
66916 * tabular output look nice.
66917 */
66918 if (spec.field_width == -1)
66919 spec.field_width = 2 * sizeof(void *);
66920- return string(buf, end, "(null)", spec);
66921+ return string(buf, end, "(nil)", spec);
66922 }
66923
66924 switch (*fmt) {
66925@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf
66926 /* Fallthrough */
66927 case 'S':
66928 case 's':
66929+#ifdef CONFIG_GRKERNSEC_HIDESYM
66930+ break;
66931+#else
66932+ return symbol_string(buf, end, ptr, spec, *fmt);
66933+#endif
66934+ case 'A':
66935+ case 'a':
66936 case 'B':
66937 return symbol_string(buf, end, ptr, spec, *fmt);
66938 case 'R':
66939@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size,
66940 typeof(type) value; \
66941 if (sizeof(type) == 8) { \
66942 args = PTR_ALIGN(args, sizeof(u32)); \
66943- *(u32 *)&value = *(u32 *)args; \
66944- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66945+ *(u32 *)&value = *(const u32 *)args; \
66946+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66947 } else { \
66948 args = PTR_ALIGN(args, sizeof(type)); \
66949- value = *(typeof(type) *)args; \
66950+ value = *(const typeof(type) *)args; \
66951 } \
66952 args += sizeof(type); \
66953 value; \
66954@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size,
66955 case FORMAT_TYPE_STR: {
66956 const char *str_arg = args;
66957 args += strlen(str_arg) + 1;
66958- str = string(str, end, (char *)str_arg, spec);
66959+ str = string(str, end, str_arg, spec);
66960 break;
66961 }
66962
66963diff -urNp linux-3.1.1/localversion-grsec linux-3.1.1/localversion-grsec
66964--- linux-3.1.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66965+++ linux-3.1.1/localversion-grsec 2011-11-16 18:40:44.000000000 -0500
66966@@ -0,0 +1 @@
66967+-grsec
66968diff -urNp linux-3.1.1/Makefile linux-3.1.1/Makefile
66969--- linux-3.1.1/Makefile 2011-11-11 15:19:27.000000000 -0500
66970+++ linux-3.1.1/Makefile 2011-11-16 18:45:38.000000000 -0500
66971@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66972
66973 HOSTCC = gcc
66974 HOSTCXX = g++
66975-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66976-HOSTCXXFLAGS = -O2
66977+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66978+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66979+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66980
66981 # Decide whether to build built-in, modular, or both.
66982 # Normally, just do built-in.
66983@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
66984 KBUILD_CPPFLAGS := -D__KERNEL__
66985
66986 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66987+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66988 -fno-strict-aliasing -fno-common \
66989 -Werror-implicit-function-declaration \
66990 -Wno-format-security \
66991 -fno-delete-null-pointer-checks
66992+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66993 KBUILD_AFLAGS_KERNEL :=
66994 KBUILD_CFLAGS_KERNEL :=
66995 KBUILD_AFLAGS := -D__ASSEMBLY__
66996@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66997 # Rules shared between *config targets and build targets
66998
66999 # Basic helpers built in scripts/
67000-PHONY += scripts_basic
67001-scripts_basic:
67002+PHONY += scripts_basic gcc-plugins
67003+scripts_basic: gcc-plugins
67004 $(Q)$(MAKE) $(build)=scripts/basic
67005 $(Q)rm -f .tmp_quiet_recordmcount
67006
67007@@ -564,6 +567,37 @@ else
67008 KBUILD_CFLAGS += -O2
67009 endif
67010
67011+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
67012+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
67013+ifdef CONFIG_PAX_MEMORY_STACKLEAK
67014+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
67015+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67016+endif
67017+ifdef CONFIG_KALLOCSTAT_PLUGIN
67018+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
67019+endif
67020+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
67021+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
67022+endif
67023+ifdef CONFIG_CHECKER_PLUGIN
67024+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
67025+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
67026+endif
67027+endif
67028+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
67029+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
67030+gcc-plugins:
67031+ $(Q)$(MAKE) $(build)=tools/gcc
67032+else
67033+gcc-plugins:
67034+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67035+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67036+else
67037+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67038+endif
67039+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67040+endif
67041+
67042 include $(srctree)/arch/$(SRCARCH)/Makefile
67043
67044 ifneq ($(CONFIG_FRAME_WARN),0)
67045@@ -708,7 +742,7 @@ export mod_strip_cmd
67046
67047
67048 ifeq ($(KBUILD_EXTMOD),)
67049-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67050+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67051
67052 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67053 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67054@@ -932,6 +966,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
67055
67056 # The actual objects are generated when descending,
67057 # make sure no implicit rule kicks in
67058+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
67059 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
67060
67061 # Handle descending into subdirectories listed in $(vmlinux-dirs)
67062@@ -941,7 +976,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
67063 # Error messages still appears in the original language
67064
67065 PHONY += $(vmlinux-dirs)
67066-$(vmlinux-dirs): prepare scripts
67067+$(vmlinux-dirs): gcc-plugins prepare scripts
67068 $(Q)$(MAKE) $(build)=$@
67069
67070 # Store (new) KERNELRELASE string in include/config/kernel.release
67071@@ -986,6 +1021,7 @@ prepare0: archprepare FORCE
67072 $(Q)$(MAKE) $(build)=. missing-syscalls
67073
67074 # All the preparing..
67075+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
67076 prepare: prepare0
67077
67078 # Generate some files
67079@@ -1087,6 +1123,7 @@ all: modules
67080 # using awk while concatenating to the final file.
67081
67082 PHONY += modules
67083+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67084 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
67085 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
67086 @$(kecho) ' Building modules, stage 2.';
67087@@ -1102,7 +1139,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
67088
67089 # Target to prepare building external modules
67090 PHONY += modules_prepare
67091-modules_prepare: prepare scripts
67092+modules_prepare: gcc-plugins prepare scripts
67093
67094 # Target to install modules
67095 PHONY += modules_install
67096@@ -1198,7 +1235,7 @@ distclean: mrproper
67097 @find $(srctree) $(RCS_FIND_IGNORE) \
67098 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
67099 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
67100- -o -name '.*.rej' -o -size 0 \
67101+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
67102 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
67103 -type f -print | xargs rm -f
67104
67105@@ -1360,6 +1397,7 @@ PHONY += $(module-dirs) modules
67106 $(module-dirs): crmodverdir $(objtree)/Module.symvers
67107 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
67108
67109+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67110 modules: $(module-dirs)
67111 @$(kecho) ' Building modules, stage 2.';
67112 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
67113@@ -1486,17 +1524,19 @@ else
67114 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
67115 endif
67116
67117-%.s: %.c prepare scripts FORCE
67118+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
67119+%.s: %.c gcc-plugins prepare scripts FORCE
67120 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67121 %.i: %.c prepare scripts FORCE
67122 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67123-%.o: %.c prepare scripts FORCE
67124+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
67125+%.o: %.c gcc-plugins prepare scripts FORCE
67126 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67127 %.lst: %.c prepare scripts FORCE
67128 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67129-%.s: %.S prepare scripts FORCE
67130+%.s: %.S gcc-plugins prepare scripts FORCE
67131 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67132-%.o: %.S prepare scripts FORCE
67133+%.o: %.S gcc-plugins prepare scripts FORCE
67134 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67135 %.symtypes: %.c prepare scripts FORCE
67136 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67137@@ -1506,11 +1546,13 @@ endif
67138 $(cmd_crmodverdir)
67139 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67140 $(build)=$(build-dir)
67141-%/: prepare scripts FORCE
67142+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
67143+%/: gcc-plugins prepare scripts FORCE
67144 $(cmd_crmodverdir)
67145 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67146 $(build)=$(build-dir)
67147-%.ko: prepare scripts FORCE
67148+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
67149+%.ko: gcc-plugins prepare scripts FORCE
67150 $(cmd_crmodverdir)
67151 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67152 $(build)=$(build-dir) $(@:.ko=.o)
67153diff -urNp linux-3.1.1/mm/filemap.c linux-3.1.1/mm/filemap.c
67154--- linux-3.1.1/mm/filemap.c 2011-11-11 15:19:27.000000000 -0500
67155+++ linux-3.1.1/mm/filemap.c 2011-11-16 18:40:44.000000000 -0500
67156@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file
67157 struct address_space *mapping = file->f_mapping;
67158
67159 if (!mapping->a_ops->readpage)
67160- return -ENOEXEC;
67161+ return -ENODEV;
67162 file_accessed(file);
67163 vma->vm_ops = &generic_file_vm_ops;
67164 vma->vm_flags |= VM_CAN_NONLINEAR;
67165@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct f
67166 *pos = i_size_read(inode);
67167
67168 if (limit != RLIM_INFINITY) {
67169+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67170 if (*pos >= limit) {
67171 send_sig(SIGXFSZ, current, 0);
67172 return -EFBIG;
67173diff -urNp linux-3.1.1/mm/fremap.c linux-3.1.1/mm/fremap.c
67174--- linux-3.1.1/mm/fremap.c 2011-11-11 15:19:27.000000000 -0500
67175+++ linux-3.1.1/mm/fremap.c 2011-11-16 18:39:08.000000000 -0500
67176@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67177 retry:
67178 vma = find_vma(mm, start);
67179
67180+#ifdef CONFIG_PAX_SEGMEXEC
67181+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67182+ goto out;
67183+#endif
67184+
67185 /*
67186 * Make sure the vma is shared, that it supports prefaulting,
67187 * and that the remapped range is valid and fully within
67188diff -urNp linux-3.1.1/mm/highmem.c linux-3.1.1/mm/highmem.c
67189--- linux-3.1.1/mm/highmem.c 2011-11-11 15:19:27.000000000 -0500
67190+++ linux-3.1.1/mm/highmem.c 2011-11-16 18:39:08.000000000 -0500
67191@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67192 * So no dangers, even with speculative execution.
67193 */
67194 page = pte_page(pkmap_page_table[i]);
67195+ pax_open_kernel();
67196 pte_clear(&init_mm, (unsigned long)page_address(page),
67197 &pkmap_page_table[i]);
67198-
67199+ pax_close_kernel();
67200 set_page_address(page, NULL);
67201 need_flush = 1;
67202 }
67203@@ -186,9 +187,11 @@ start:
67204 }
67205 }
67206 vaddr = PKMAP_ADDR(last_pkmap_nr);
67207+
67208+ pax_open_kernel();
67209 set_pte_at(&init_mm, vaddr,
67210 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67211-
67212+ pax_close_kernel();
67213 pkmap_count[last_pkmap_nr] = 1;
67214 set_page_address(page, (void *)vaddr);
67215
67216diff -urNp linux-3.1.1/mm/huge_memory.c linux-3.1.1/mm/huge_memory.c
67217--- linux-3.1.1/mm/huge_memory.c 2011-11-11 15:19:27.000000000 -0500
67218+++ linux-3.1.1/mm/huge_memory.c 2011-11-16 18:39:08.000000000 -0500
67219@@ -702,7 +702,7 @@ out:
67220 * run pte_offset_map on the pmd, if an huge pmd could
67221 * materialize from under us from a different thread.
67222 */
67223- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67224+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67225 return VM_FAULT_OOM;
67226 /* if an huge pmd materialized from under us just retry later */
67227 if (unlikely(pmd_trans_huge(*pmd)))
67228diff -urNp linux-3.1.1/mm/hugetlb.c linux-3.1.1/mm/hugetlb.c
67229--- linux-3.1.1/mm/hugetlb.c 2011-11-11 15:19:27.000000000 -0500
67230+++ linux-3.1.1/mm/hugetlb.c 2011-11-16 18:39:08.000000000 -0500
67231@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_s
67232 return 1;
67233 }
67234
67235+#ifdef CONFIG_PAX_SEGMEXEC
67236+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67237+{
67238+ struct mm_struct *mm = vma->vm_mm;
67239+ struct vm_area_struct *vma_m;
67240+ unsigned long address_m;
67241+ pte_t *ptep_m;
67242+
67243+ vma_m = pax_find_mirror_vma(vma);
67244+ if (!vma_m)
67245+ return;
67246+
67247+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67248+ address_m = address + SEGMEXEC_TASK_SIZE;
67249+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67250+ get_page(page_m);
67251+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67252+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67253+}
67254+#endif
67255+
67256 /*
67257 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67258 */
67259@@ -2447,6 +2468,11 @@ retry_avoidcopy:
67260 make_huge_pte(vma, new_page, 1));
67261 page_remove_rmap(old_page);
67262 hugepage_add_new_anon_rmap(new_page, vma, address);
67263+
67264+#ifdef CONFIG_PAX_SEGMEXEC
67265+ pax_mirror_huge_pte(vma, address, new_page);
67266+#endif
67267+
67268 /* Make the old page be freed below */
67269 new_page = old_page;
67270 mmu_notifier_invalidate_range_end(mm,
67271@@ -2598,6 +2624,10 @@ retry:
67272 && (vma->vm_flags & VM_SHARED)));
67273 set_huge_pte_at(mm, address, ptep, new_pte);
67274
67275+#ifdef CONFIG_PAX_SEGMEXEC
67276+ pax_mirror_huge_pte(vma, address, page);
67277+#endif
67278+
67279 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67280 /* Optimization, do the COW without a second fault */
67281 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67282@@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm,
67283 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67284 struct hstate *h = hstate_vma(vma);
67285
67286+#ifdef CONFIG_PAX_SEGMEXEC
67287+ struct vm_area_struct *vma_m;
67288+#endif
67289+
67290 ptep = huge_pte_offset(mm, address);
67291 if (ptep) {
67292 entry = huge_ptep_get(ptep);
67293@@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm,
67294 VM_FAULT_SET_HINDEX(h - hstates);
67295 }
67296
67297+#ifdef CONFIG_PAX_SEGMEXEC
67298+ vma_m = pax_find_mirror_vma(vma);
67299+ if (vma_m) {
67300+ unsigned long address_m;
67301+
67302+ if (vma->vm_start > vma_m->vm_start) {
67303+ address_m = address;
67304+ address -= SEGMEXEC_TASK_SIZE;
67305+ vma = vma_m;
67306+ h = hstate_vma(vma);
67307+ } else
67308+ address_m = address + SEGMEXEC_TASK_SIZE;
67309+
67310+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67311+ return VM_FAULT_OOM;
67312+ address_m &= HPAGE_MASK;
67313+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67314+ }
67315+#endif
67316+
67317 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67318 if (!ptep)
67319 return VM_FAULT_OOM;
67320diff -urNp linux-3.1.1/mm/internal.h linux-3.1.1/mm/internal.h
67321--- linux-3.1.1/mm/internal.h 2011-11-11 15:19:27.000000000 -0500
67322+++ linux-3.1.1/mm/internal.h 2011-11-16 18:39:08.000000000 -0500
67323@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
67324 * in mm/page_alloc.c
67325 */
67326 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67327+extern void free_compound_page(struct page *page);
67328 extern void prep_compound_page(struct page *page, unsigned long order);
67329 #ifdef CONFIG_MEMORY_FAILURE
67330 extern bool is_free_buddy_page(struct page *page);
67331diff -urNp linux-3.1.1/mm/Kconfig linux-3.1.1/mm/Kconfig
67332--- linux-3.1.1/mm/Kconfig 2011-11-11 15:19:27.000000000 -0500
67333+++ linux-3.1.1/mm/Kconfig 2011-11-16 18:40:44.000000000 -0500
67334@@ -240,7 +240,7 @@ config KSM
67335 config DEFAULT_MMAP_MIN_ADDR
67336 int "Low address space to protect from user allocation"
67337 depends on MMU
67338- default 4096
67339+ default 65536
67340 help
67341 This is the portion of low virtual memory which should be protected
67342 from userspace allocation. Keeping a user from writing to low pages
67343diff -urNp linux-3.1.1/mm/kmemleak.c linux-3.1.1/mm/kmemleak.c
67344--- linux-3.1.1/mm/kmemleak.c 2011-11-11 15:19:27.000000000 -0500
67345+++ linux-3.1.1/mm/kmemleak.c 2011-11-16 18:40:44.000000000 -0500
67346@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67347
67348 for (i = 0; i < object->trace_len; i++) {
67349 void *ptr = (void *)object->trace[i];
67350- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67351+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67352 }
67353 }
67354
67355diff -urNp linux-3.1.1/mm/maccess.c linux-3.1.1/mm/maccess.c
67356--- linux-3.1.1/mm/maccess.c 2011-11-11 15:19:27.000000000 -0500
67357+++ linux-3.1.1/mm/maccess.c 2011-11-16 18:39:08.000000000 -0500
67358@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67359 set_fs(KERNEL_DS);
67360 pagefault_disable();
67361 ret = __copy_from_user_inatomic(dst,
67362- (__force const void __user *)src, size);
67363+ (const void __force_user *)src, size);
67364 pagefault_enable();
67365 set_fs(old_fs);
67366
67367@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67368
67369 set_fs(KERNEL_DS);
67370 pagefault_disable();
67371- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67372+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67373 pagefault_enable();
67374 set_fs(old_fs);
67375
67376diff -urNp linux-3.1.1/mm/madvise.c linux-3.1.1/mm/madvise.c
67377--- linux-3.1.1/mm/madvise.c 2011-11-11 15:19:27.000000000 -0500
67378+++ linux-3.1.1/mm/madvise.c 2011-11-16 18:39:08.000000000 -0500
67379@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67380 pgoff_t pgoff;
67381 unsigned long new_flags = vma->vm_flags;
67382
67383+#ifdef CONFIG_PAX_SEGMEXEC
67384+ struct vm_area_struct *vma_m;
67385+#endif
67386+
67387 switch (behavior) {
67388 case MADV_NORMAL:
67389 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67390@@ -110,6 +114,13 @@ success:
67391 /*
67392 * vm_flags is protected by the mmap_sem held in write mode.
67393 */
67394+
67395+#ifdef CONFIG_PAX_SEGMEXEC
67396+ vma_m = pax_find_mirror_vma(vma);
67397+ if (vma_m)
67398+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67399+#endif
67400+
67401 vma->vm_flags = new_flags;
67402
67403 out:
67404@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67405 struct vm_area_struct ** prev,
67406 unsigned long start, unsigned long end)
67407 {
67408+
67409+#ifdef CONFIG_PAX_SEGMEXEC
67410+ struct vm_area_struct *vma_m;
67411+#endif
67412+
67413 *prev = vma;
67414 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67415 return -EINVAL;
67416@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67417 zap_page_range(vma, start, end - start, &details);
67418 } else
67419 zap_page_range(vma, start, end - start, NULL);
67420+
67421+#ifdef CONFIG_PAX_SEGMEXEC
67422+ vma_m = pax_find_mirror_vma(vma);
67423+ if (vma_m) {
67424+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67425+ struct zap_details details = {
67426+ .nonlinear_vma = vma_m,
67427+ .last_index = ULONG_MAX,
67428+ };
67429+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67430+ } else
67431+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67432+ }
67433+#endif
67434+
67435 return 0;
67436 }
67437
67438@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67439 if (end < start)
67440 goto out;
67441
67442+#ifdef CONFIG_PAX_SEGMEXEC
67443+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67444+ if (end > SEGMEXEC_TASK_SIZE)
67445+ goto out;
67446+ } else
67447+#endif
67448+
67449+ if (end > TASK_SIZE)
67450+ goto out;
67451+
67452 error = 0;
67453 if (end == start)
67454 goto out;
67455diff -urNp linux-3.1.1/mm/memory.c linux-3.1.1/mm/memory.c
67456--- linux-3.1.1/mm/memory.c 2011-11-11 15:19:27.000000000 -0500
67457+++ linux-3.1.1/mm/memory.c 2011-11-16 18:39:08.000000000 -0500
67458@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67459 return;
67460
67461 pmd = pmd_offset(pud, start);
67462+
67463+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67464 pud_clear(pud);
67465 pmd_free_tlb(tlb, pmd, start);
67466+#endif
67467+
67468 }
67469
67470 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67471@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67472 if (end - 1 > ceiling - 1)
67473 return;
67474
67475+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67476 pud = pud_offset(pgd, start);
67477 pgd_clear(pgd);
67478 pud_free_tlb(tlb, pud, start);
67479+#endif
67480+
67481 }
67482
67483 /*
67484@@ -1566,12 +1573,6 @@ no_page_table:
67485 return page;
67486 }
67487
67488-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67489-{
67490- return stack_guard_page_start(vma, addr) ||
67491- stack_guard_page_end(vma, addr+PAGE_SIZE);
67492-}
67493-
67494 /**
67495 * __get_user_pages() - pin user pages in memory
67496 * @tsk: task_struct of target task
67497@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct
67498 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67499 i = 0;
67500
67501- do {
67502+ while (nr_pages) {
67503 struct vm_area_struct *vma;
67504
67505- vma = find_extend_vma(mm, start);
67506+ vma = find_vma(mm, start);
67507 if (!vma && in_gate_area(mm, start)) {
67508 unsigned long pg = start & PAGE_MASK;
67509 pgd_t *pgd;
67510@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct
67511 goto next_page;
67512 }
67513
67514- if (!vma ||
67515+ if (!vma || start < vma->vm_start ||
67516 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67517 !(vm_flags & vma->vm_flags))
67518 return i ? : -EFAULT;
67519@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct
67520 int ret;
67521 unsigned int fault_flags = 0;
67522
67523- /* For mlock, just skip the stack guard page. */
67524- if (foll_flags & FOLL_MLOCK) {
67525- if (stack_guard_page(vma, start))
67526- goto next_page;
67527- }
67528 if (foll_flags & FOLL_WRITE)
67529 fault_flags |= FAULT_FLAG_WRITE;
67530 if (nonblocking)
67531@@ -1800,7 +1796,7 @@ next_page:
67532 start += PAGE_SIZE;
67533 nr_pages--;
67534 } while (nr_pages && start < vma->vm_end);
67535- } while (nr_pages);
67536+ }
67537 return i;
67538 }
67539 EXPORT_SYMBOL(__get_user_pages);
67540@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_st
67541 page_add_file_rmap(page);
67542 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67543
67544+#ifdef CONFIG_PAX_SEGMEXEC
67545+ pax_mirror_file_pte(vma, addr, page, ptl);
67546+#endif
67547+
67548 retval = 0;
67549 pte_unmap_unlock(pte, ptl);
67550 return retval;
67551@@ -2041,10 +2041,22 @@ out:
67552 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67553 struct page *page)
67554 {
67555+
67556+#ifdef CONFIG_PAX_SEGMEXEC
67557+ struct vm_area_struct *vma_m;
67558+#endif
67559+
67560 if (addr < vma->vm_start || addr >= vma->vm_end)
67561 return -EFAULT;
67562 if (!page_count(page))
67563 return -EINVAL;
67564+
67565+#ifdef CONFIG_PAX_SEGMEXEC
67566+ vma_m = pax_find_mirror_vma(vma);
67567+ if (vma_m)
67568+ vma_m->vm_flags |= VM_INSERTPAGE;
67569+#endif
67570+
67571 vma->vm_flags |= VM_INSERTPAGE;
67572 return insert_page(vma, addr, page, vma->vm_page_prot);
67573 }
67574@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struc
67575 unsigned long pfn)
67576 {
67577 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67578+ BUG_ON(vma->vm_mirror);
67579
67580 if (addr < vma->vm_start || addr >= vma->vm_end)
67581 return -EFAULT;
67582@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct
67583 copy_user_highpage(dst, src, va, vma);
67584 }
67585
67586+#ifdef CONFIG_PAX_SEGMEXEC
67587+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67588+{
67589+ struct mm_struct *mm = vma->vm_mm;
67590+ spinlock_t *ptl;
67591+ pte_t *pte, entry;
67592+
67593+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67594+ entry = *pte;
67595+ if (!pte_present(entry)) {
67596+ if (!pte_none(entry)) {
67597+ BUG_ON(pte_file(entry));
67598+ free_swap_and_cache(pte_to_swp_entry(entry));
67599+ pte_clear_not_present_full(mm, address, pte, 0);
67600+ }
67601+ } else {
67602+ struct page *page;
67603+
67604+ flush_cache_page(vma, address, pte_pfn(entry));
67605+ entry = ptep_clear_flush(vma, address, pte);
67606+ BUG_ON(pte_dirty(entry));
67607+ page = vm_normal_page(vma, address, entry);
67608+ if (page) {
67609+ update_hiwater_rss(mm);
67610+ if (PageAnon(page))
67611+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67612+ else
67613+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67614+ page_remove_rmap(page);
67615+ page_cache_release(page);
67616+ }
67617+ }
67618+ pte_unmap_unlock(pte, ptl);
67619+}
67620+
67621+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67622+ *
67623+ * the ptl of the lower mapped page is held on entry and is not released on exit
67624+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67625+ */
67626+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67627+{
67628+ struct mm_struct *mm = vma->vm_mm;
67629+ unsigned long address_m;
67630+ spinlock_t *ptl_m;
67631+ struct vm_area_struct *vma_m;
67632+ pmd_t *pmd_m;
67633+ pte_t *pte_m, entry_m;
67634+
67635+ BUG_ON(!page_m || !PageAnon(page_m));
67636+
67637+ vma_m = pax_find_mirror_vma(vma);
67638+ if (!vma_m)
67639+ return;
67640+
67641+ BUG_ON(!PageLocked(page_m));
67642+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67643+ address_m = address + SEGMEXEC_TASK_SIZE;
67644+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67645+ pte_m = pte_offset_map(pmd_m, address_m);
67646+ ptl_m = pte_lockptr(mm, pmd_m);
67647+ if (ptl != ptl_m) {
67648+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67649+ if (!pte_none(*pte_m))
67650+ goto out;
67651+ }
67652+
67653+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67654+ page_cache_get(page_m);
67655+ page_add_anon_rmap(page_m, vma_m, address_m);
67656+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67657+ set_pte_at(mm, address_m, pte_m, entry_m);
67658+ update_mmu_cache(vma_m, address_m, entry_m);
67659+out:
67660+ if (ptl != ptl_m)
67661+ spin_unlock(ptl_m);
67662+ pte_unmap(pte_m);
67663+ unlock_page(page_m);
67664+}
67665+
67666+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67667+{
67668+ struct mm_struct *mm = vma->vm_mm;
67669+ unsigned long address_m;
67670+ spinlock_t *ptl_m;
67671+ struct vm_area_struct *vma_m;
67672+ pmd_t *pmd_m;
67673+ pte_t *pte_m, entry_m;
67674+
67675+ BUG_ON(!page_m || PageAnon(page_m));
67676+
67677+ vma_m = pax_find_mirror_vma(vma);
67678+ if (!vma_m)
67679+ return;
67680+
67681+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67682+ address_m = address + SEGMEXEC_TASK_SIZE;
67683+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67684+ pte_m = pte_offset_map(pmd_m, address_m);
67685+ ptl_m = pte_lockptr(mm, pmd_m);
67686+ if (ptl != ptl_m) {
67687+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67688+ if (!pte_none(*pte_m))
67689+ goto out;
67690+ }
67691+
67692+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67693+ page_cache_get(page_m);
67694+ page_add_file_rmap(page_m);
67695+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67696+ set_pte_at(mm, address_m, pte_m, entry_m);
67697+ update_mmu_cache(vma_m, address_m, entry_m);
67698+out:
67699+ if (ptl != ptl_m)
67700+ spin_unlock(ptl_m);
67701+ pte_unmap(pte_m);
67702+}
67703+
67704+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67705+{
67706+ struct mm_struct *mm = vma->vm_mm;
67707+ unsigned long address_m;
67708+ spinlock_t *ptl_m;
67709+ struct vm_area_struct *vma_m;
67710+ pmd_t *pmd_m;
67711+ pte_t *pte_m, entry_m;
67712+
67713+ vma_m = pax_find_mirror_vma(vma);
67714+ if (!vma_m)
67715+ return;
67716+
67717+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67718+ address_m = address + SEGMEXEC_TASK_SIZE;
67719+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67720+ pte_m = pte_offset_map(pmd_m, address_m);
67721+ ptl_m = pte_lockptr(mm, pmd_m);
67722+ if (ptl != ptl_m) {
67723+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67724+ if (!pte_none(*pte_m))
67725+ goto out;
67726+ }
67727+
67728+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67729+ set_pte_at(mm, address_m, pte_m, entry_m);
67730+out:
67731+ if (ptl != ptl_m)
67732+ spin_unlock(ptl_m);
67733+ pte_unmap(pte_m);
67734+}
67735+
67736+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67737+{
67738+ struct page *page_m;
67739+ pte_t entry;
67740+
67741+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67742+ goto out;
67743+
67744+ entry = *pte;
67745+ page_m = vm_normal_page(vma, address, entry);
67746+ if (!page_m)
67747+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67748+ else if (PageAnon(page_m)) {
67749+ if (pax_find_mirror_vma(vma)) {
67750+ pte_unmap_unlock(pte, ptl);
67751+ lock_page(page_m);
67752+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67753+ if (pte_same(entry, *pte))
67754+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67755+ else
67756+ unlock_page(page_m);
67757+ }
67758+ } else
67759+ pax_mirror_file_pte(vma, address, page_m, ptl);
67760+
67761+out:
67762+ pte_unmap_unlock(pte, ptl);
67763+}
67764+#endif
67765+
67766 /*
67767 * This routine handles present pages, when users try to write
67768 * to a shared page. It is done by copying the page to a new address
67769@@ -2656,6 +2849,12 @@ gotten:
67770 */
67771 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67772 if (likely(pte_same(*page_table, orig_pte))) {
67773+
67774+#ifdef CONFIG_PAX_SEGMEXEC
67775+ if (pax_find_mirror_vma(vma))
67776+ BUG_ON(!trylock_page(new_page));
67777+#endif
67778+
67779 if (old_page) {
67780 if (!PageAnon(old_page)) {
67781 dec_mm_counter_fast(mm, MM_FILEPAGES);
67782@@ -2707,6 +2906,10 @@ gotten:
67783 page_remove_rmap(old_page);
67784 }
67785
67786+#ifdef CONFIG_PAX_SEGMEXEC
67787+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67788+#endif
67789+
67790 /* Free the old page.. */
67791 new_page = old_page;
67792 ret |= VM_FAULT_WRITE;
67793@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct
67794 swap_free(entry);
67795 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67796 try_to_free_swap(page);
67797+
67798+#ifdef CONFIG_PAX_SEGMEXEC
67799+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67800+#endif
67801+
67802 unlock_page(page);
67803 if (swapcache) {
67804 /*
67805@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct
67806
67807 /* No need to invalidate - it was non-present before */
67808 update_mmu_cache(vma, address, page_table);
67809+
67810+#ifdef CONFIG_PAX_SEGMEXEC
67811+ pax_mirror_anon_pte(vma, address, page, ptl);
67812+#endif
67813+
67814 unlock:
67815 pte_unmap_unlock(page_table, ptl);
67816 out:
67817@@ -3028,40 +3241,6 @@ out_release:
67818 }
67819
67820 /*
67821- * This is like a special single-page "expand_{down|up}wards()",
67822- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67823- * doesn't hit another vma.
67824- */
67825-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67826-{
67827- address &= PAGE_MASK;
67828- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67829- struct vm_area_struct *prev = vma->vm_prev;
67830-
67831- /*
67832- * Is there a mapping abutting this one below?
67833- *
67834- * That's only ok if it's the same stack mapping
67835- * that has gotten split..
67836- */
67837- if (prev && prev->vm_end == address)
67838- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67839-
67840- expand_downwards(vma, address - PAGE_SIZE);
67841- }
67842- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67843- struct vm_area_struct *next = vma->vm_next;
67844-
67845- /* As VM_GROWSDOWN but s/below/above/ */
67846- if (next && next->vm_start == address + PAGE_SIZE)
67847- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67848-
67849- expand_upwards(vma, address + PAGE_SIZE);
67850- }
67851- return 0;
67852-}
67853-
67854-/*
67855 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67856 * but allow concurrent faults), and pte mapped but not yet locked.
67857 * We return with mmap_sem still held, but pte unmapped and unlocked.
67858@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_s
67859 unsigned long address, pte_t *page_table, pmd_t *pmd,
67860 unsigned int flags)
67861 {
67862- struct page *page;
67863+ struct page *page = NULL;
67864 spinlock_t *ptl;
67865 pte_t entry;
67866
67867- pte_unmap(page_table);
67868-
67869- /* Check if we need to add a guard page to the stack */
67870- if (check_stack_guard_page(vma, address) < 0)
67871- return VM_FAULT_SIGBUS;
67872-
67873- /* Use the zero-page for reads */
67874 if (!(flags & FAULT_FLAG_WRITE)) {
67875 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67876 vma->vm_page_prot));
67877- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67878+ ptl = pte_lockptr(mm, pmd);
67879+ spin_lock(ptl);
67880 if (!pte_none(*page_table))
67881 goto unlock;
67882 goto setpte;
67883 }
67884
67885 /* Allocate our own private page. */
67886+ pte_unmap(page_table);
67887+
67888 if (unlikely(anon_vma_prepare(vma)))
67889 goto oom;
67890 page = alloc_zeroed_user_highpage_movable(vma, address);
67891@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_s
67892 if (!pte_none(*page_table))
67893 goto release;
67894
67895+#ifdef CONFIG_PAX_SEGMEXEC
67896+ if (pax_find_mirror_vma(vma))
67897+ BUG_ON(!trylock_page(page));
67898+#endif
67899+
67900 inc_mm_counter_fast(mm, MM_ANONPAGES);
67901 page_add_new_anon_rmap(page, vma, address);
67902 setpte:
67903@@ -3116,6 +3296,12 @@ setpte:
67904
67905 /* No need to invalidate - it was non-present before */
67906 update_mmu_cache(vma, address, page_table);
67907+
67908+#ifdef CONFIG_PAX_SEGMEXEC
67909+ if (page)
67910+ pax_mirror_anon_pte(vma, address, page, ptl);
67911+#endif
67912+
67913 unlock:
67914 pte_unmap_unlock(page_table, ptl);
67915 return 0;
67916@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *
67917 */
67918 /* Only go through if we didn't race with anybody else... */
67919 if (likely(pte_same(*page_table, orig_pte))) {
67920+
67921+#ifdef CONFIG_PAX_SEGMEXEC
67922+ if (anon && pax_find_mirror_vma(vma))
67923+ BUG_ON(!trylock_page(page));
67924+#endif
67925+
67926 flush_icache_page(vma, page);
67927 entry = mk_pte(page, vma->vm_page_prot);
67928 if (flags & FAULT_FLAG_WRITE)
67929@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *
67930
67931 /* no need to invalidate: a not-present page won't be cached */
67932 update_mmu_cache(vma, address, page_table);
67933+
67934+#ifdef CONFIG_PAX_SEGMEXEC
67935+ if (anon)
67936+ pax_mirror_anon_pte(vma, address, page, ptl);
67937+ else
67938+ pax_mirror_file_pte(vma, address, page, ptl);
67939+#endif
67940+
67941 } else {
67942 if (cow_page)
67943 mem_cgroup_uncharge_page(cow_page);
67944@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *m
67945 if (flags & FAULT_FLAG_WRITE)
67946 flush_tlb_fix_spurious_fault(vma, address);
67947 }
67948+
67949+#ifdef CONFIG_PAX_SEGMEXEC
67950+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67951+ return 0;
67952+#endif
67953+
67954 unlock:
67955 pte_unmap_unlock(pte, ptl);
67956 return 0;
67957@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm
67958 pmd_t *pmd;
67959 pte_t *pte;
67960
67961+#ifdef CONFIG_PAX_SEGMEXEC
67962+ struct vm_area_struct *vma_m;
67963+#endif
67964+
67965 __set_current_state(TASK_RUNNING);
67966
67967 count_vm_event(PGFAULT);
67968@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm
67969 if (unlikely(is_vm_hugetlb_page(vma)))
67970 return hugetlb_fault(mm, vma, address, flags);
67971
67972+#ifdef CONFIG_PAX_SEGMEXEC
67973+ vma_m = pax_find_mirror_vma(vma);
67974+ if (vma_m) {
67975+ unsigned long address_m;
67976+ pgd_t *pgd_m;
67977+ pud_t *pud_m;
67978+ pmd_t *pmd_m;
67979+
67980+ if (vma->vm_start > vma_m->vm_start) {
67981+ address_m = address;
67982+ address -= SEGMEXEC_TASK_SIZE;
67983+ vma = vma_m;
67984+ } else
67985+ address_m = address + SEGMEXEC_TASK_SIZE;
67986+
67987+ pgd_m = pgd_offset(mm, address_m);
67988+ pud_m = pud_alloc(mm, pgd_m, address_m);
67989+ if (!pud_m)
67990+ return VM_FAULT_OOM;
67991+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67992+ if (!pmd_m)
67993+ return VM_FAULT_OOM;
67994+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67995+ return VM_FAULT_OOM;
67996+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67997+ }
67998+#endif
67999+
68000 pgd = pgd_offset(mm, address);
68001 pud = pud_alloc(mm, pgd, address);
68002 if (!pud)
68003@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm
68004 * run pte_offset_map on the pmd, if an huge pmd could
68005 * materialize from under us from a different thread.
68006 */
68007- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68008+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68009 return VM_FAULT_OOM;
68010 /* if an huge pmd materialized from under us just retry later */
68011 if (unlikely(pmd_trans_huge(*pmd)))
68012@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68013 gate_vma.vm_start = FIXADDR_USER_START;
68014 gate_vma.vm_end = FIXADDR_USER_END;
68015 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68016- gate_vma.vm_page_prot = __P101;
68017+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68018 /*
68019 * Make sure the vDSO gets into every core dump.
68020 * Dumping its contents makes post-mortem fully interpretable later
68021diff -urNp linux-3.1.1/mm/memory-failure.c linux-3.1.1/mm/memory-failure.c
68022--- linux-3.1.1/mm/memory-failure.c 2011-11-11 15:19:27.000000000 -0500
68023+++ linux-3.1.1/mm/memory-failure.c 2011-11-16 18:39:08.000000000 -0500
68024@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __r
68025
68026 int sysctl_memory_failure_recovery __read_mostly = 1;
68027
68028-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68029+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68030
68031 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68032
68033@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_stru
68034 si.si_signo = SIGBUS;
68035 si.si_errno = 0;
68036 si.si_code = BUS_MCEERR_AO;
68037- si.si_addr = (void *)addr;
68038+ si.si_addr = (void __user *)addr;
68039 #ifdef __ARCH_SI_TRAPNO
68040 si.si_trapno = trapno;
68041 #endif
68042@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn,
68043 }
68044
68045 nr_pages = 1 << compound_trans_order(hpage);
68046- atomic_long_add(nr_pages, &mce_bad_pages);
68047+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68048
68049 /*
68050 * We need/can do nothing about count=0 pages.
68051@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn,
68052 if (!PageHWPoison(hpage)
68053 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68054 || (p != hpage && TestSetPageHWPoison(hpage))) {
68055- atomic_long_sub(nr_pages, &mce_bad_pages);
68056+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68057 return 0;
68058 }
68059 set_page_hwpoison_huge_page(hpage);
68060@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn,
68061 }
68062 if (hwpoison_filter(p)) {
68063 if (TestClearPageHWPoison(p))
68064- atomic_long_sub(nr_pages, &mce_bad_pages);
68065+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68066 unlock_page(hpage);
68067 put_page(hpage);
68068 return 0;
68069@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
68070 return 0;
68071 }
68072 if (TestClearPageHWPoison(p))
68073- atomic_long_sub(nr_pages, &mce_bad_pages);
68074+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68075 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68076 return 0;
68077 }
68078@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
68079 */
68080 if (TestClearPageHWPoison(page)) {
68081 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68082- atomic_long_sub(nr_pages, &mce_bad_pages);
68083+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68084 freeit = 1;
68085 if (PageHuge(page))
68086 clear_page_hwpoison_huge_page(page);
68087@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct
68088 }
68089 done:
68090 if (!PageHWPoison(hpage))
68091- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68092+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68093 set_page_hwpoison_huge_page(hpage);
68094 dequeue_hwpoisoned_huge_page(hpage);
68095 /* keep elevated page count for bad page */
68096@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page,
68097 return ret;
68098
68099 done:
68100- atomic_long_add(1, &mce_bad_pages);
68101+ atomic_long_add_unchecked(1, &mce_bad_pages);
68102 SetPageHWPoison(page);
68103 /* keep elevated page count for bad page */
68104 return ret;
68105diff -urNp linux-3.1.1/mm/mempolicy.c linux-3.1.1/mm/mempolicy.c
68106--- linux-3.1.1/mm/mempolicy.c 2011-11-11 15:19:27.000000000 -0500
68107+++ linux-3.1.1/mm/mempolicy.c 2011-11-16 18:40:44.000000000 -0500
68108@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
68109 unsigned long vmstart;
68110 unsigned long vmend;
68111
68112+#ifdef CONFIG_PAX_SEGMEXEC
68113+ struct vm_area_struct *vma_m;
68114+#endif
68115+
68116 vma = find_vma_prev(mm, start, &prev);
68117 if (!vma || vma->vm_start > start)
68118 return -EFAULT;
68119@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
68120 err = policy_vma(vma, new_pol);
68121 if (err)
68122 goto out;
68123+
68124+#ifdef CONFIG_PAX_SEGMEXEC
68125+ vma_m = pax_find_mirror_vma(vma);
68126+ if (vma_m) {
68127+ err = policy_vma(vma_m, new_pol);
68128+ if (err)
68129+ goto out;
68130+ }
68131+#endif
68132+
68133 }
68134
68135 out:
68136@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
68137
68138 if (end < start)
68139 return -EINVAL;
68140+
68141+#ifdef CONFIG_PAX_SEGMEXEC
68142+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68143+ if (end > SEGMEXEC_TASK_SIZE)
68144+ return -EINVAL;
68145+ } else
68146+#endif
68147+
68148+ if (end > TASK_SIZE)
68149+ return -EINVAL;
68150+
68151 if (end == start)
68152 return 0;
68153
68154@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68155 if (!mm)
68156 goto out;
68157
68158+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68159+ if (mm != current->mm &&
68160+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68161+ err = -EPERM;
68162+ goto out;
68163+ }
68164+#endif
68165+
68166 /*
68167 * Check if this process has the right to modify the specified
68168 * process. The right exists if the process has administrative
68169@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68170 rcu_read_lock();
68171 tcred = __task_cred(task);
68172 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68173- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68174- !capable(CAP_SYS_NICE)) {
68175+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68176 rcu_read_unlock();
68177 err = -EPERM;
68178 goto out;
68179diff -urNp linux-3.1.1/mm/migrate.c linux-3.1.1/mm/migrate.c
68180--- linux-3.1.1/mm/migrate.c 2011-11-11 15:19:27.000000000 -0500
68181+++ linux-3.1.1/mm/migrate.c 2011-11-16 18:40:44.000000000 -0500
68182@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
68183 unsigned long chunk_start;
68184 int err;
68185
68186+ pax_track_stack();
68187+
68188 task_nodes = cpuset_mems_allowed(task);
68189
68190 err = -ENOMEM;
68191@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68192 if (!mm)
68193 return -EINVAL;
68194
68195+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68196+ if (mm != current->mm &&
68197+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68198+ err = -EPERM;
68199+ goto out;
68200+ }
68201+#endif
68202+
68203 /*
68204 * Check if this process has the right to modify the specified
68205 * process. The right exists if the process has administrative
68206@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68207 rcu_read_lock();
68208 tcred = __task_cred(task);
68209 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68210- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68211- !capable(CAP_SYS_NICE)) {
68212+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68213 rcu_read_unlock();
68214 err = -EPERM;
68215 goto out;
68216diff -urNp linux-3.1.1/mm/mlock.c linux-3.1.1/mm/mlock.c
68217--- linux-3.1.1/mm/mlock.c 2011-11-11 15:19:27.000000000 -0500
68218+++ linux-3.1.1/mm/mlock.c 2011-11-16 18:40:44.000000000 -0500
68219@@ -13,6 +13,7 @@
68220 #include <linux/pagemap.h>
68221 #include <linux/mempolicy.h>
68222 #include <linux/syscalls.h>
68223+#include <linux/security.h>
68224 #include <linux/sched.h>
68225 #include <linux/module.h>
68226 #include <linux/rmap.h>
68227@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
68228 return -EINVAL;
68229 if (end == start)
68230 return 0;
68231+ if (end > TASK_SIZE)
68232+ return -EINVAL;
68233+
68234 vma = find_vma_prev(current->mm, start, &prev);
68235 if (!vma || vma->vm_start > start)
68236 return -ENOMEM;
68237@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
68238 for (nstart = start ; ; ) {
68239 vm_flags_t newflags;
68240
68241+#ifdef CONFIG_PAX_SEGMEXEC
68242+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68243+ break;
68244+#endif
68245+
68246 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68247
68248 newflags = vma->vm_flags | VM_LOCKED;
68249@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68250 lock_limit >>= PAGE_SHIFT;
68251
68252 /* check against resource limits */
68253+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68254 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68255 error = do_mlock(start, len, 1);
68256 up_write(&current->mm->mmap_sem);
68257@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68258 static int do_mlockall(int flags)
68259 {
68260 struct vm_area_struct * vma, * prev = NULL;
68261- unsigned int def_flags = 0;
68262
68263 if (flags & MCL_FUTURE)
68264- def_flags = VM_LOCKED;
68265- current->mm->def_flags = def_flags;
68266+ current->mm->def_flags |= VM_LOCKED;
68267+ else
68268+ current->mm->def_flags &= ~VM_LOCKED;
68269 if (flags == MCL_FUTURE)
68270 goto out;
68271
68272 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68273 vm_flags_t newflags;
68274
68275+#ifdef CONFIG_PAX_SEGMEXEC
68276+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68277+ break;
68278+#endif
68279+
68280+ BUG_ON(vma->vm_end > TASK_SIZE);
68281 newflags = vma->vm_flags | VM_LOCKED;
68282 if (!(flags & MCL_CURRENT))
68283 newflags &= ~VM_LOCKED;
68284@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68285 lock_limit >>= PAGE_SHIFT;
68286
68287 ret = -ENOMEM;
68288+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68289 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68290 capable(CAP_IPC_LOCK))
68291 ret = do_mlockall(flags);
68292diff -urNp linux-3.1.1/mm/mmap.c linux-3.1.1/mm/mmap.c
68293--- linux-3.1.1/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
68294+++ linux-3.1.1/mm/mmap.c 2011-11-16 18:40:44.000000000 -0500
68295@@ -46,6 +46,16 @@
68296 #define arch_rebalance_pgtables(addr, len) (addr)
68297 #endif
68298
68299+static inline void verify_mm_writelocked(struct mm_struct *mm)
68300+{
68301+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68302+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68303+ up_read(&mm->mmap_sem);
68304+ BUG();
68305+ }
68306+#endif
68307+}
68308+
68309 static void unmap_region(struct mm_struct *mm,
68310 struct vm_area_struct *vma, struct vm_area_struct *prev,
68311 unsigned long start, unsigned long end);
68312@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
68313 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68314 *
68315 */
68316-pgprot_t protection_map[16] = {
68317+pgprot_t protection_map[16] __read_only = {
68318 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68319 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68320 };
68321
68322-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68323+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68324 {
68325- return __pgprot(pgprot_val(protection_map[vm_flags &
68326+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68327 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68328 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68329+
68330+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68331+ if (!(__supported_pte_mask & _PAGE_NX) &&
68332+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68333+ (vm_flags & (VM_READ | VM_WRITE)))
68334+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68335+#endif
68336+
68337+ return prot;
68338 }
68339 EXPORT_SYMBOL(vm_get_page_prot);
68340
68341 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68342 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68343 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68344+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68345 /*
68346 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68347 * other variables. It can be updated by several CPUs frequently.
68348@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
68349 struct vm_area_struct *next = vma->vm_next;
68350
68351 might_sleep();
68352+ BUG_ON(vma->vm_mirror);
68353 if (vma->vm_ops && vma->vm_ops->close)
68354 vma->vm_ops->close(vma);
68355 if (vma->vm_file) {
68356@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68357 * not page aligned -Ram Gupta
68358 */
68359 rlim = rlimit(RLIMIT_DATA);
68360+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68361 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68362 (mm->end_data - mm->start_data) > rlim)
68363 goto out;
68364@@ -689,6 +711,12 @@ static int
68365 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68366 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68367 {
68368+
68369+#ifdef CONFIG_PAX_SEGMEXEC
68370+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68371+ return 0;
68372+#endif
68373+
68374 if (is_mergeable_vma(vma, file, vm_flags) &&
68375 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68376 if (vma->vm_pgoff == vm_pgoff)
68377@@ -708,6 +736,12 @@ static int
68378 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68379 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68380 {
68381+
68382+#ifdef CONFIG_PAX_SEGMEXEC
68383+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68384+ return 0;
68385+#endif
68386+
68387 if (is_mergeable_vma(vma, file, vm_flags) &&
68388 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68389 pgoff_t vm_pglen;
68390@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
68391 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68392 struct vm_area_struct *prev, unsigned long addr,
68393 unsigned long end, unsigned long vm_flags,
68394- struct anon_vma *anon_vma, struct file *file,
68395+ struct anon_vma *anon_vma, struct file *file,
68396 pgoff_t pgoff, struct mempolicy *policy)
68397 {
68398 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68399 struct vm_area_struct *area, *next;
68400 int err;
68401
68402+#ifdef CONFIG_PAX_SEGMEXEC
68403+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68404+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68405+
68406+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68407+#endif
68408+
68409 /*
68410 * We later require that vma->vm_flags == vm_flags,
68411 * so this tests vma->vm_flags & VM_SPECIAL, too.
68412@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
68413 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68414 next = next->vm_next;
68415
68416+#ifdef CONFIG_PAX_SEGMEXEC
68417+ if (prev)
68418+ prev_m = pax_find_mirror_vma(prev);
68419+ if (area)
68420+ area_m = pax_find_mirror_vma(area);
68421+ if (next)
68422+ next_m = pax_find_mirror_vma(next);
68423+#endif
68424+
68425 /*
68426 * Can it merge with the predecessor?
68427 */
68428@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
68429 /* cases 1, 6 */
68430 err = vma_adjust(prev, prev->vm_start,
68431 next->vm_end, prev->vm_pgoff, NULL);
68432- } else /* cases 2, 5, 7 */
68433+
68434+#ifdef CONFIG_PAX_SEGMEXEC
68435+ if (!err && prev_m)
68436+ err = vma_adjust(prev_m, prev_m->vm_start,
68437+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68438+#endif
68439+
68440+ } else { /* cases 2, 5, 7 */
68441 err = vma_adjust(prev, prev->vm_start,
68442 end, prev->vm_pgoff, NULL);
68443+
68444+#ifdef CONFIG_PAX_SEGMEXEC
68445+ if (!err && prev_m)
68446+ err = vma_adjust(prev_m, prev_m->vm_start,
68447+ end_m, prev_m->vm_pgoff, NULL);
68448+#endif
68449+
68450+ }
68451 if (err)
68452 return NULL;
68453 khugepaged_enter_vma_merge(prev);
68454@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
68455 mpol_equal(policy, vma_policy(next)) &&
68456 can_vma_merge_before(next, vm_flags,
68457 anon_vma, file, pgoff+pglen)) {
68458- if (prev && addr < prev->vm_end) /* case 4 */
68459+ if (prev && addr < prev->vm_end) { /* case 4 */
68460 err = vma_adjust(prev, prev->vm_start,
68461 addr, prev->vm_pgoff, NULL);
68462- else /* cases 3, 8 */
68463+
68464+#ifdef CONFIG_PAX_SEGMEXEC
68465+ if (!err && prev_m)
68466+ err = vma_adjust(prev_m, prev_m->vm_start,
68467+ addr_m, prev_m->vm_pgoff, NULL);
68468+#endif
68469+
68470+ } else { /* cases 3, 8 */
68471 err = vma_adjust(area, addr, next->vm_end,
68472 next->vm_pgoff - pglen, NULL);
68473+
68474+#ifdef CONFIG_PAX_SEGMEXEC
68475+ if (!err && area_m)
68476+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68477+ next_m->vm_pgoff - pglen, NULL);
68478+#endif
68479+
68480+ }
68481 if (err)
68482 return NULL;
68483 khugepaged_enter_vma_merge(area);
68484@@ -921,14 +1001,11 @@ none:
68485 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68486 struct file *file, long pages)
68487 {
68488- const unsigned long stack_flags
68489- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68490-
68491 if (file) {
68492 mm->shared_vm += pages;
68493 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68494 mm->exec_vm += pages;
68495- } else if (flags & stack_flags)
68496+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68497 mm->stack_vm += pages;
68498 if (flags & (VM_RESERVED|VM_IO))
68499 mm->reserved_vm += pages;
68500@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
68501 * (the exception is when the underlying filesystem is noexec
68502 * mounted, in which case we dont add PROT_EXEC.)
68503 */
68504- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68505+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68506 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68507 prot |= PROT_EXEC;
68508
68509@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
68510 /* Obtain the address to map to. we verify (or select) it and ensure
68511 * that it represents a valid section of the address space.
68512 */
68513- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68514+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68515 if (addr & ~PAGE_MASK)
68516 return addr;
68517
68518@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file
68519 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68520 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68521
68522+#ifdef CONFIG_PAX_MPROTECT
68523+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68524+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68525+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68526+ gr_log_rwxmmap(file);
68527+
68528+#ifdef CONFIG_PAX_EMUPLT
68529+ vm_flags &= ~VM_EXEC;
68530+#else
68531+ return -EPERM;
68532+#endif
68533+
68534+ }
68535+
68536+ if (!(vm_flags & VM_EXEC))
68537+ vm_flags &= ~VM_MAYEXEC;
68538+#else
68539+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68540+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68541+#endif
68542+ else
68543+ vm_flags &= ~VM_MAYWRITE;
68544+ }
68545+#endif
68546+
68547+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68548+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68549+ vm_flags &= ~VM_PAGEEXEC;
68550+#endif
68551+
68552 if (flags & MAP_LOCKED)
68553 if (!can_do_mlock())
68554 return -EPERM;
68555@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file
68556 locked += mm->locked_vm;
68557 lock_limit = rlimit(RLIMIT_MEMLOCK);
68558 lock_limit >>= PAGE_SHIFT;
68559+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68560 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68561 return -EAGAIN;
68562 }
68563@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file
68564 if (error)
68565 return error;
68566
68567+ if (!gr_acl_handle_mmap(file, prot))
68568+ return -EACCES;
68569+
68570 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68571 }
68572 EXPORT_SYMBOL(do_mmap_pgoff);
68573@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area
68574 vm_flags_t vm_flags = vma->vm_flags;
68575
68576 /* If it was private or non-writable, the write bit is already clear */
68577- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68578+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68579 return 0;
68580
68581 /* The backer wishes to know when pages are first written to? */
68582@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *f
68583 unsigned long charged = 0;
68584 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68585
68586+#ifdef CONFIG_PAX_SEGMEXEC
68587+ struct vm_area_struct *vma_m = NULL;
68588+#endif
68589+
68590+ /*
68591+ * mm->mmap_sem is required to protect against another thread
68592+ * changing the mappings in case we sleep.
68593+ */
68594+ verify_mm_writelocked(mm);
68595+
68596 /* Clear old maps */
68597 error = -ENOMEM;
68598-munmap_back:
68599 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68600 if (vma && vma->vm_start < addr + len) {
68601 if (do_munmap(mm, addr, len))
68602 return -ENOMEM;
68603- goto munmap_back;
68604+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68605+ BUG_ON(vma && vma->vm_start < addr + len);
68606 }
68607
68608 /* Check against address space limit. */
68609@@ -1258,6 +1379,16 @@ munmap_back:
68610 goto unacct_error;
68611 }
68612
68613+#ifdef CONFIG_PAX_SEGMEXEC
68614+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68615+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68616+ if (!vma_m) {
68617+ error = -ENOMEM;
68618+ goto free_vma;
68619+ }
68620+ }
68621+#endif
68622+
68623 vma->vm_mm = mm;
68624 vma->vm_start = addr;
68625 vma->vm_end = addr + len;
68626@@ -1281,6 +1412,19 @@ munmap_back:
68627 error = file->f_op->mmap(file, vma);
68628 if (error)
68629 goto unmap_and_free_vma;
68630+
68631+#ifdef CONFIG_PAX_SEGMEXEC
68632+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68633+ added_exe_file_vma(mm);
68634+#endif
68635+
68636+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68637+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68638+ vma->vm_flags |= VM_PAGEEXEC;
68639+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68640+ }
68641+#endif
68642+
68643 if (vm_flags & VM_EXECUTABLE)
68644 added_exe_file_vma(mm);
68645
68646@@ -1316,6 +1460,11 @@ munmap_back:
68647 vma_link(mm, vma, prev, rb_link, rb_parent);
68648 file = vma->vm_file;
68649
68650+#ifdef CONFIG_PAX_SEGMEXEC
68651+ if (vma_m)
68652+ BUG_ON(pax_mirror_vma(vma_m, vma));
68653+#endif
68654+
68655 /* Once vma denies write, undo our temporary denial count */
68656 if (correct_wcount)
68657 atomic_inc(&inode->i_writecount);
68658@@ -1324,6 +1473,7 @@ out:
68659
68660 mm->total_vm += len >> PAGE_SHIFT;
68661 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68662+ track_exec_limit(mm, addr, addr + len, vm_flags);
68663 if (vm_flags & VM_LOCKED) {
68664 if (!mlock_vma_pages_range(vma, addr, addr + len))
68665 mm->locked_vm += (len >> PAGE_SHIFT);
68666@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68667 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68668 charged = 0;
68669 free_vma:
68670+
68671+#ifdef CONFIG_PAX_SEGMEXEC
68672+ if (vma_m)
68673+ kmem_cache_free(vm_area_cachep, vma_m);
68674+#endif
68675+
68676 kmem_cache_free(vm_area_cachep, vma);
68677 unacct_error:
68678 if (charged)
68679@@ -1348,6 +1504,44 @@ unacct_error:
68680 return error;
68681 }
68682
68683+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68684+{
68685+ if (!vma) {
68686+#ifdef CONFIG_STACK_GROWSUP
68687+ if (addr > sysctl_heap_stack_gap)
68688+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68689+ else
68690+ vma = find_vma(current->mm, 0);
68691+ if (vma && (vma->vm_flags & VM_GROWSUP))
68692+ return false;
68693+#endif
68694+ return true;
68695+ }
68696+
68697+ if (addr + len > vma->vm_start)
68698+ return false;
68699+
68700+ if (vma->vm_flags & VM_GROWSDOWN)
68701+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68702+#ifdef CONFIG_STACK_GROWSUP
68703+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68704+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68705+#endif
68706+
68707+ return true;
68708+}
68709+
68710+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68711+{
68712+ if (vma->vm_start < len)
68713+ return -ENOMEM;
68714+ if (!(vma->vm_flags & VM_GROWSDOWN))
68715+ return vma->vm_start - len;
68716+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68717+ return vma->vm_start - len - sysctl_heap_stack_gap;
68718+ return -ENOMEM;
68719+}
68720+
68721 /* Get an address range which is currently unmapped.
68722 * For shmat() with addr=0.
68723 *
68724@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp
68725 if (flags & MAP_FIXED)
68726 return addr;
68727
68728+#ifdef CONFIG_PAX_RANDMMAP
68729+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68730+#endif
68731+
68732 if (addr) {
68733 addr = PAGE_ALIGN(addr);
68734- vma = find_vma(mm, addr);
68735- if (TASK_SIZE - len >= addr &&
68736- (!vma || addr + len <= vma->vm_start))
68737- return addr;
68738+ if (TASK_SIZE - len >= addr) {
68739+ vma = find_vma(mm, addr);
68740+ if (check_heap_stack_gap(vma, addr, len))
68741+ return addr;
68742+ }
68743 }
68744 if (len > mm->cached_hole_size) {
68745- start_addr = addr = mm->free_area_cache;
68746+ start_addr = addr = mm->free_area_cache;
68747 } else {
68748- start_addr = addr = TASK_UNMAPPED_BASE;
68749- mm->cached_hole_size = 0;
68750+ start_addr = addr = mm->mmap_base;
68751+ mm->cached_hole_size = 0;
68752 }
68753
68754 full_search:
68755@@ -1396,34 +1595,40 @@ full_search:
68756 * Start a new search - just in case we missed
68757 * some holes.
68758 */
68759- if (start_addr != TASK_UNMAPPED_BASE) {
68760- addr = TASK_UNMAPPED_BASE;
68761- start_addr = addr;
68762+ if (start_addr != mm->mmap_base) {
68763+ start_addr = addr = mm->mmap_base;
68764 mm->cached_hole_size = 0;
68765 goto full_search;
68766 }
68767 return -ENOMEM;
68768 }
68769- if (!vma || addr + len <= vma->vm_start) {
68770- /*
68771- * Remember the place where we stopped the search:
68772- */
68773- mm->free_area_cache = addr + len;
68774- return addr;
68775- }
68776+ if (check_heap_stack_gap(vma, addr, len))
68777+ break;
68778 if (addr + mm->cached_hole_size < vma->vm_start)
68779 mm->cached_hole_size = vma->vm_start - addr;
68780 addr = vma->vm_end;
68781 }
68782+
68783+ /*
68784+ * Remember the place where we stopped the search:
68785+ */
68786+ mm->free_area_cache = addr + len;
68787+ return addr;
68788 }
68789 #endif
68790
68791 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68792 {
68793+
68794+#ifdef CONFIG_PAX_SEGMEXEC
68795+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68796+ return;
68797+#endif
68798+
68799 /*
68800 * Is this a new hole at the lowest possible address?
68801 */
68802- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68803+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68804 mm->free_area_cache = addr;
68805 mm->cached_hole_size = ~0UL;
68806 }
68807@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct fi
68808 {
68809 struct vm_area_struct *vma;
68810 struct mm_struct *mm = current->mm;
68811- unsigned long addr = addr0;
68812+ unsigned long base = mm->mmap_base, addr = addr0;
68813
68814 /* requested length too big for entire address space */
68815 if (len > TASK_SIZE)
68816@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct fi
68817 if (flags & MAP_FIXED)
68818 return addr;
68819
68820+#ifdef CONFIG_PAX_RANDMMAP
68821+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68822+#endif
68823+
68824 /* requesting a specific address */
68825 if (addr) {
68826 addr = PAGE_ALIGN(addr);
68827- vma = find_vma(mm, addr);
68828- if (TASK_SIZE - len >= addr &&
68829- (!vma || addr + len <= vma->vm_start))
68830- return addr;
68831+ if (TASK_SIZE - len >= addr) {
68832+ vma = find_vma(mm, addr);
68833+ if (check_heap_stack_gap(vma, addr, len))
68834+ return addr;
68835+ }
68836 }
68837
68838 /* check if free_area_cache is useful for us */
68839@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct fi
68840 /* make sure it can fit in the remaining address space */
68841 if (addr > len) {
68842 vma = find_vma(mm, addr-len);
68843- if (!vma || addr <= vma->vm_start)
68844+ if (check_heap_stack_gap(vma, addr - len, len))
68845 /* remember the address as a hint for next time */
68846 return (mm->free_area_cache = addr-len);
68847 }
68848@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct fi
68849 * return with success:
68850 */
68851 vma = find_vma(mm, addr);
68852- if (!vma || addr+len <= vma->vm_start)
68853+ if (check_heap_stack_gap(vma, addr, len))
68854 /* remember the address as a hint for next time */
68855 return (mm->free_area_cache = addr);
68856
68857@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct fi
68858 mm->cached_hole_size = vma->vm_start - addr;
68859
68860 /* try just below the current vma->vm_start */
68861- addr = vma->vm_start-len;
68862- } while (len < vma->vm_start);
68863+ addr = skip_heap_stack_gap(vma, len);
68864+ } while (!IS_ERR_VALUE(addr));
68865
68866 bottomup:
68867 /*
68868@@ -1507,13 +1717,21 @@ bottomup:
68869 * can happen with large stack limits and large mmap()
68870 * allocations.
68871 */
68872+ mm->mmap_base = TASK_UNMAPPED_BASE;
68873+
68874+#ifdef CONFIG_PAX_RANDMMAP
68875+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68876+ mm->mmap_base += mm->delta_mmap;
68877+#endif
68878+
68879+ mm->free_area_cache = mm->mmap_base;
68880 mm->cached_hole_size = ~0UL;
68881- mm->free_area_cache = TASK_UNMAPPED_BASE;
68882 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68883 /*
68884 * Restore the topdown base:
68885 */
68886- mm->free_area_cache = mm->mmap_base;
68887+ mm->mmap_base = base;
68888+ mm->free_area_cache = base;
68889 mm->cached_hole_size = ~0UL;
68890
68891 return addr;
68892@@ -1522,6 +1740,12 @@ bottomup:
68893
68894 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68895 {
68896+
68897+#ifdef CONFIG_PAX_SEGMEXEC
68898+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68899+ return;
68900+#endif
68901+
68902 /*
68903 * Is this a new hole at the highest possible address?
68904 */
68905@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_s
68906 mm->free_area_cache = addr;
68907
68908 /* dont allow allocations above current base */
68909- if (mm->free_area_cache > mm->mmap_base)
68910+ if (mm->free_area_cache > mm->mmap_base) {
68911 mm->free_area_cache = mm->mmap_base;
68912+ mm->cached_hole_size = ~0UL;
68913+ }
68914 }
68915
68916 unsigned long
68917@@ -1638,6 +1864,28 @@ out:
68918 return prev ? prev->vm_next : vma;
68919 }
68920
68921+#ifdef CONFIG_PAX_SEGMEXEC
68922+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68923+{
68924+ struct vm_area_struct *vma_m;
68925+
68926+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68927+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68928+ BUG_ON(vma->vm_mirror);
68929+ return NULL;
68930+ }
68931+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68932+ vma_m = vma->vm_mirror;
68933+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68934+ BUG_ON(vma->vm_file != vma_m->vm_file);
68935+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68936+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68937+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68938+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68939+ return vma_m;
68940+}
68941+#endif
68942+
68943 /*
68944 * Verify that the stack growth is acceptable and
68945 * update accounting. This is shared with both the
68946@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_a
68947 return -ENOMEM;
68948
68949 /* Stack limit test */
68950+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68951 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68952 return -ENOMEM;
68953
68954@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_a
68955 locked = mm->locked_vm + grow;
68956 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68957 limit >>= PAGE_SHIFT;
68958+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68959 if (locked > limit && !capable(CAP_IPC_LOCK))
68960 return -ENOMEM;
68961 }
68962@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_a
68963 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68964 * vma is the last one with address > vma->vm_end. Have to extend vma.
68965 */
68966+#ifndef CONFIG_IA64
68967+static
68968+#endif
68969 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68970 {
68971 int error;
68972+ bool locknext;
68973
68974 if (!(vma->vm_flags & VM_GROWSUP))
68975 return -EFAULT;
68976
68977+ /* Also guard against wrapping around to address 0. */
68978+ if (address < PAGE_ALIGN(address+1))
68979+ address = PAGE_ALIGN(address+1);
68980+ else
68981+ return -ENOMEM;
68982+
68983 /*
68984 * We must make sure the anon_vma is allocated
68985 * so that the anon_vma locking is not a noop.
68986 */
68987 if (unlikely(anon_vma_prepare(vma)))
68988 return -ENOMEM;
68989+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68990+ if (locknext && anon_vma_prepare(vma->vm_next))
68991+ return -ENOMEM;
68992 vma_lock_anon_vma(vma);
68993+ if (locknext)
68994+ vma_lock_anon_vma(vma->vm_next);
68995
68996 /*
68997 * vma->vm_start/vm_end cannot change under us because the caller
68998 * is required to hold the mmap_sem in read mode. We need the
68999- * anon_vma lock to serialize against concurrent expand_stacks.
69000- * Also guard against wrapping around to address 0.
69001+ * anon_vma locks to serialize against concurrent expand_stacks
69002+ * and expand_upwards.
69003 */
69004- if (address < PAGE_ALIGN(address+4))
69005- address = PAGE_ALIGN(address+4);
69006- else {
69007- vma_unlock_anon_vma(vma);
69008- return -ENOMEM;
69009- }
69010 error = 0;
69011
69012 /* Somebody else might have raced and expanded it already */
69013- if (address > vma->vm_end) {
69014+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69015+ error = -ENOMEM;
69016+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69017 unsigned long size, grow;
69018
69019 size = address - vma->vm_start;
69020@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct
69021 }
69022 }
69023 }
69024+ if (locknext)
69025+ vma_unlock_anon_vma(vma->vm_next);
69026 vma_unlock_anon_vma(vma);
69027 khugepaged_enter_vma_merge(vma);
69028 return error;
69029@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_stru
69030 unsigned long address)
69031 {
69032 int error;
69033+ bool lockprev = false;
69034+ struct vm_area_struct *prev;
69035
69036 /*
69037 * We must make sure the anon_vma is allocated
69038@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_stru
69039 if (error)
69040 return error;
69041
69042+ prev = vma->vm_prev;
69043+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69044+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69045+#endif
69046+ if (lockprev && anon_vma_prepare(prev))
69047+ return -ENOMEM;
69048+ if (lockprev)
69049+ vma_lock_anon_vma(prev);
69050+
69051 vma_lock_anon_vma(vma);
69052
69053 /*
69054@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_stru
69055 */
69056
69057 /* Somebody else might have raced and expanded it already */
69058- if (address < vma->vm_start) {
69059+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69060+ error = -ENOMEM;
69061+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69062 unsigned long size, grow;
69063
69064+#ifdef CONFIG_PAX_SEGMEXEC
69065+ struct vm_area_struct *vma_m;
69066+
69067+ vma_m = pax_find_mirror_vma(vma);
69068+#endif
69069+
69070 size = vma->vm_end - address;
69071 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69072
69073@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_stru
69074 if (!error) {
69075 vma->vm_start = address;
69076 vma->vm_pgoff -= grow;
69077+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69078+
69079+#ifdef CONFIG_PAX_SEGMEXEC
69080+ if (vma_m) {
69081+ vma_m->vm_start -= grow << PAGE_SHIFT;
69082+ vma_m->vm_pgoff -= grow;
69083+ }
69084+#endif
69085+
69086 perf_event_mmap(vma);
69087 }
69088 }
69089 }
69090 vma_unlock_anon_vma(vma);
69091+ if (lockprev)
69092+ vma_unlock_anon_vma(prev);
69093 khugepaged_enter_vma_merge(vma);
69094 return error;
69095 }
69096@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_st
69097 do {
69098 long nrpages = vma_pages(vma);
69099
69100+#ifdef CONFIG_PAX_SEGMEXEC
69101+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69102+ vma = remove_vma(vma);
69103+ continue;
69104+ }
69105+#endif
69106+
69107 mm->total_vm -= nrpages;
69108 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69109 vma = remove_vma(vma);
69110@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69111 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69112 vma->vm_prev = NULL;
69113 do {
69114+
69115+#ifdef CONFIG_PAX_SEGMEXEC
69116+ if (vma->vm_mirror) {
69117+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69118+ vma->vm_mirror->vm_mirror = NULL;
69119+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69120+ vma->vm_mirror = NULL;
69121+ }
69122+#endif
69123+
69124 rb_erase(&vma->vm_rb, &mm->mm_rb);
69125 mm->map_count--;
69126 tail_vma = vma;
69127@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct
69128 struct vm_area_struct *new;
69129 int err = -ENOMEM;
69130
69131+#ifdef CONFIG_PAX_SEGMEXEC
69132+ struct vm_area_struct *vma_m, *new_m = NULL;
69133+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69134+#endif
69135+
69136 if (is_vm_hugetlb_page(vma) && (addr &
69137 ~(huge_page_mask(hstate_vma(vma)))))
69138 return -EINVAL;
69139
69140+#ifdef CONFIG_PAX_SEGMEXEC
69141+ vma_m = pax_find_mirror_vma(vma);
69142+#endif
69143+
69144 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69145 if (!new)
69146 goto out_err;
69147
69148+#ifdef CONFIG_PAX_SEGMEXEC
69149+ if (vma_m) {
69150+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69151+ if (!new_m) {
69152+ kmem_cache_free(vm_area_cachep, new);
69153+ goto out_err;
69154+ }
69155+ }
69156+#endif
69157+
69158 /* most fields are the same, copy all, and then fixup */
69159 *new = *vma;
69160
69161@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct
69162 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69163 }
69164
69165+#ifdef CONFIG_PAX_SEGMEXEC
69166+ if (vma_m) {
69167+ *new_m = *vma_m;
69168+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69169+ new_m->vm_mirror = new;
69170+ new->vm_mirror = new_m;
69171+
69172+ if (new_below)
69173+ new_m->vm_end = addr_m;
69174+ else {
69175+ new_m->vm_start = addr_m;
69176+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69177+ }
69178+ }
69179+#endif
69180+
69181 pol = mpol_dup(vma_policy(vma));
69182 if (IS_ERR(pol)) {
69183 err = PTR_ERR(pol);
69184@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct
69185 else
69186 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69187
69188+#ifdef CONFIG_PAX_SEGMEXEC
69189+ if (!err && vma_m) {
69190+ if (anon_vma_clone(new_m, vma_m))
69191+ goto out_free_mpol;
69192+
69193+ mpol_get(pol);
69194+ vma_set_policy(new_m, pol);
69195+
69196+ if (new_m->vm_file) {
69197+ get_file(new_m->vm_file);
69198+ if (vma_m->vm_flags & VM_EXECUTABLE)
69199+ added_exe_file_vma(mm);
69200+ }
69201+
69202+ if (new_m->vm_ops && new_m->vm_ops->open)
69203+ new_m->vm_ops->open(new_m);
69204+
69205+ if (new_below)
69206+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69207+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69208+ else
69209+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69210+
69211+ if (err) {
69212+ if (new_m->vm_ops && new_m->vm_ops->close)
69213+ new_m->vm_ops->close(new_m);
69214+ if (new_m->vm_file) {
69215+ if (vma_m->vm_flags & VM_EXECUTABLE)
69216+ removed_exe_file_vma(mm);
69217+ fput(new_m->vm_file);
69218+ }
69219+ mpol_put(pol);
69220+ }
69221+ }
69222+#endif
69223+
69224 /* Success. */
69225 if (!err)
69226 return 0;
69227@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct
69228 removed_exe_file_vma(mm);
69229 fput(new->vm_file);
69230 }
69231- unlink_anon_vmas(new);
69232 out_free_mpol:
69233 mpol_put(pol);
69234 out_free_vma:
69235+
69236+#ifdef CONFIG_PAX_SEGMEXEC
69237+ if (new_m) {
69238+ unlink_anon_vmas(new_m);
69239+ kmem_cache_free(vm_area_cachep, new_m);
69240+ }
69241+#endif
69242+
69243+ unlink_anon_vmas(new);
69244 kmem_cache_free(vm_area_cachep, new);
69245 out_err:
69246 return err;
69247@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct
69248 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69249 unsigned long addr, int new_below)
69250 {
69251+
69252+#ifdef CONFIG_PAX_SEGMEXEC
69253+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69254+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69255+ if (mm->map_count >= sysctl_max_map_count-1)
69256+ return -ENOMEM;
69257+ } else
69258+#endif
69259+
69260 if (mm->map_count >= sysctl_max_map_count)
69261 return -ENOMEM;
69262
69263@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, stru
69264 * work. This now handles partial unmappings.
69265 * Jeremy Fitzhardinge <jeremy@goop.org>
69266 */
69267+#ifdef CONFIG_PAX_SEGMEXEC
69268 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69269 {
69270+ int ret = __do_munmap(mm, start, len);
69271+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69272+ return ret;
69273+
69274+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69275+}
69276+
69277+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69278+#else
69279+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69280+#endif
69281+{
69282 unsigned long end;
69283 struct vm_area_struct *vma, *prev, *last;
69284
69285+ /*
69286+ * mm->mmap_sem is required to protect against another thread
69287+ * changing the mappings in case we sleep.
69288+ */
69289+ verify_mm_writelocked(mm);
69290+
69291 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69292 return -EINVAL;
69293
69294@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsi
69295 /* Fix up all other VM information */
69296 remove_vma_list(mm, vma);
69297
69298+ track_exec_limit(mm, start, end, 0UL);
69299+
69300 return 0;
69301 }
69302
69303@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69304
69305 profile_munmap(addr);
69306
69307+#ifdef CONFIG_PAX_SEGMEXEC
69308+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69309+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69310+ return -EINVAL;
69311+#endif
69312+
69313 down_write(&mm->mmap_sem);
69314 ret = do_munmap(mm, addr, len);
69315 up_write(&mm->mmap_sem);
69316 return ret;
69317 }
69318
69319-static inline void verify_mm_writelocked(struct mm_struct *mm)
69320-{
69321-#ifdef CONFIG_DEBUG_VM
69322- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69323- WARN_ON(1);
69324- up_read(&mm->mmap_sem);
69325- }
69326-#endif
69327-}
69328-
69329 /*
69330 * this is really a simplified "do_mmap". it only handles
69331 * anonymous maps. eventually we may be able to do some
69332@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr,
69333 struct rb_node ** rb_link, * rb_parent;
69334 pgoff_t pgoff = addr >> PAGE_SHIFT;
69335 int error;
69336+ unsigned long charged;
69337
69338 len = PAGE_ALIGN(len);
69339 if (!len)
69340@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr,
69341
69342 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69343
69344+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69345+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69346+ flags &= ~VM_EXEC;
69347+
69348+#ifdef CONFIG_PAX_MPROTECT
69349+ if (mm->pax_flags & MF_PAX_MPROTECT)
69350+ flags &= ~VM_MAYEXEC;
69351+#endif
69352+
69353+ }
69354+#endif
69355+
69356 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69357 if (error & ~PAGE_MASK)
69358 return error;
69359
69360+ charged = len >> PAGE_SHIFT;
69361+
69362 /*
69363 * mlock MCL_FUTURE?
69364 */
69365 if (mm->def_flags & VM_LOCKED) {
69366 unsigned long locked, lock_limit;
69367- locked = len >> PAGE_SHIFT;
69368+ locked = charged;
69369 locked += mm->locked_vm;
69370 lock_limit = rlimit(RLIMIT_MEMLOCK);
69371 lock_limit >>= PAGE_SHIFT;
69372@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr,
69373 /*
69374 * Clear old maps. this also does some error checking for us
69375 */
69376- munmap_back:
69377 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69378 if (vma && vma->vm_start < addr + len) {
69379 if (do_munmap(mm, addr, len))
69380 return -ENOMEM;
69381- goto munmap_back;
69382+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69383+ BUG_ON(vma && vma->vm_start < addr + len);
69384 }
69385
69386 /* Check against address space limits *after* clearing old maps... */
69387- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69388+ if (!may_expand_vm(mm, charged))
69389 return -ENOMEM;
69390
69391 if (mm->map_count > sysctl_max_map_count)
69392 return -ENOMEM;
69393
69394- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69395+ if (security_vm_enough_memory(charged))
69396 return -ENOMEM;
69397
69398 /* Can we just expand an old private anonymous mapping? */
69399@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr,
69400 */
69401 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69402 if (!vma) {
69403- vm_unacct_memory(len >> PAGE_SHIFT);
69404+ vm_unacct_memory(charged);
69405 return -ENOMEM;
69406 }
69407
69408@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr,
69409 vma_link(mm, vma, prev, rb_link, rb_parent);
69410 out:
69411 perf_event_mmap(vma);
69412- mm->total_vm += len >> PAGE_SHIFT;
69413+ mm->total_vm += charged;
69414 if (flags & VM_LOCKED) {
69415 if (!mlock_vma_pages_range(vma, addr, addr + len))
69416- mm->locked_vm += (len >> PAGE_SHIFT);
69417+ mm->locked_vm += charged;
69418 }
69419+ track_exec_limit(mm, addr, addr + len, flags);
69420 return addr;
69421 }
69422
69423@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69424 * Walk the list again, actually closing and freeing it,
69425 * with preemption enabled, without holding any MM locks.
69426 */
69427- while (vma)
69428+ while (vma) {
69429+ vma->vm_mirror = NULL;
69430 vma = remove_vma(vma);
69431+ }
69432
69433 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69434 }
69435@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct *
69436 struct vm_area_struct * __vma, * prev;
69437 struct rb_node ** rb_link, * rb_parent;
69438
69439+#ifdef CONFIG_PAX_SEGMEXEC
69440+ struct vm_area_struct *vma_m = NULL;
69441+#endif
69442+
69443+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69444+ return -EPERM;
69445+
69446 /*
69447 * The vm_pgoff of a purely anonymous vma should be irrelevant
69448 * until its first write fault, when page's anon_vma and index
69449@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct *
69450 if ((vma->vm_flags & VM_ACCOUNT) &&
69451 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69452 return -ENOMEM;
69453+
69454+#ifdef CONFIG_PAX_SEGMEXEC
69455+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69456+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69457+ if (!vma_m)
69458+ return -ENOMEM;
69459+ }
69460+#endif
69461+
69462 vma_link(mm, vma, prev, rb_link, rb_parent);
69463+
69464+#ifdef CONFIG_PAX_SEGMEXEC
69465+ if (vma_m)
69466+ BUG_ON(pax_mirror_vma(vma_m, vma));
69467+#endif
69468+
69469 return 0;
69470 }
69471
69472@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct v
69473 struct rb_node **rb_link, *rb_parent;
69474 struct mempolicy *pol;
69475
69476+ BUG_ON(vma->vm_mirror);
69477+
69478 /*
69479 * If anonymous vma has not yet been faulted, update new pgoff
69480 * to match new location, to increase its chance of merging.
69481@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct v
69482 return NULL;
69483 }
69484
69485+#ifdef CONFIG_PAX_SEGMEXEC
69486+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69487+{
69488+ struct vm_area_struct *prev_m;
69489+ struct rb_node **rb_link_m, *rb_parent_m;
69490+ struct mempolicy *pol_m;
69491+
69492+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69493+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69494+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69495+ *vma_m = *vma;
69496+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69497+ if (anon_vma_clone(vma_m, vma))
69498+ return -ENOMEM;
69499+ pol_m = vma_policy(vma_m);
69500+ mpol_get(pol_m);
69501+ vma_set_policy(vma_m, pol_m);
69502+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69503+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69504+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69505+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69506+ if (vma_m->vm_file)
69507+ get_file(vma_m->vm_file);
69508+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69509+ vma_m->vm_ops->open(vma_m);
69510+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69511+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69512+ vma_m->vm_mirror = vma;
69513+ vma->vm_mirror = vma_m;
69514+ return 0;
69515+}
69516+#endif
69517+
69518 /*
69519 * Return true if the calling process may expand its vm space by the passed
69520 * number of pages
69521@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm,
69522 unsigned long lim;
69523
69524 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69525-
69526+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69527 if (cur + npages > lim)
69528 return 0;
69529 return 1;
69530@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_st
69531 vma->vm_start = addr;
69532 vma->vm_end = addr + len;
69533
69534+#ifdef CONFIG_PAX_MPROTECT
69535+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69536+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69537+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69538+ return -EPERM;
69539+ if (!(vm_flags & VM_EXEC))
69540+ vm_flags &= ~VM_MAYEXEC;
69541+#else
69542+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69543+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69544+#endif
69545+ else
69546+ vm_flags &= ~VM_MAYWRITE;
69547+ }
69548+#endif
69549+
69550 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69551 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69552
69553diff -urNp linux-3.1.1/mm/mprotect.c linux-3.1.1/mm/mprotect.c
69554--- linux-3.1.1/mm/mprotect.c 2011-11-11 15:19:27.000000000 -0500
69555+++ linux-3.1.1/mm/mprotect.c 2011-11-16 18:40:44.000000000 -0500
69556@@ -23,10 +23,16 @@
69557 #include <linux/mmu_notifier.h>
69558 #include <linux/migrate.h>
69559 #include <linux/perf_event.h>
69560+
69561+#ifdef CONFIG_PAX_MPROTECT
69562+#include <linux/elf.h>
69563+#endif
69564+
69565 #include <asm/uaccess.h>
69566 #include <asm/pgtable.h>
69567 #include <asm/cacheflush.h>
69568 #include <asm/tlbflush.h>
69569+#include <asm/mmu_context.h>
69570
69571 #ifndef pgprot_modify
69572 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69573@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69574 flush_tlb_range(vma, start, end);
69575 }
69576
69577+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69578+/* called while holding the mmap semaphor for writing except stack expansion */
69579+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69580+{
69581+ unsigned long oldlimit, newlimit = 0UL;
69582+
69583+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69584+ return;
69585+
69586+ spin_lock(&mm->page_table_lock);
69587+ oldlimit = mm->context.user_cs_limit;
69588+ if ((prot & VM_EXEC) && oldlimit < end)
69589+ /* USER_CS limit moved up */
69590+ newlimit = end;
69591+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69592+ /* USER_CS limit moved down */
69593+ newlimit = start;
69594+
69595+ if (newlimit) {
69596+ mm->context.user_cs_limit = newlimit;
69597+
69598+#ifdef CONFIG_SMP
69599+ wmb();
69600+ cpus_clear(mm->context.cpu_user_cs_mask);
69601+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69602+#endif
69603+
69604+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69605+ }
69606+ spin_unlock(&mm->page_table_lock);
69607+ if (newlimit == end) {
69608+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69609+
69610+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69611+ if (is_vm_hugetlb_page(vma))
69612+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69613+ else
69614+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69615+ }
69616+}
69617+#endif
69618+
69619 int
69620 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69621 unsigned long start, unsigned long end, unsigned long newflags)
69622@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69623 int error;
69624 int dirty_accountable = 0;
69625
69626+#ifdef CONFIG_PAX_SEGMEXEC
69627+ struct vm_area_struct *vma_m = NULL;
69628+ unsigned long start_m, end_m;
69629+
69630+ start_m = start + SEGMEXEC_TASK_SIZE;
69631+ end_m = end + SEGMEXEC_TASK_SIZE;
69632+#endif
69633+
69634 if (newflags == oldflags) {
69635 *pprev = vma;
69636 return 0;
69637 }
69638
69639+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69640+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69641+
69642+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69643+ return -ENOMEM;
69644+
69645+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69646+ return -ENOMEM;
69647+ }
69648+
69649 /*
69650 * If we make a private mapping writable we increase our commit;
69651 * but (without finer accounting) cannot reduce our commit if we
69652@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69653 }
69654 }
69655
69656+#ifdef CONFIG_PAX_SEGMEXEC
69657+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69658+ if (start != vma->vm_start) {
69659+ error = split_vma(mm, vma, start, 1);
69660+ if (error)
69661+ goto fail;
69662+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69663+ *pprev = (*pprev)->vm_next;
69664+ }
69665+
69666+ if (end != vma->vm_end) {
69667+ error = split_vma(mm, vma, end, 0);
69668+ if (error)
69669+ goto fail;
69670+ }
69671+
69672+ if (pax_find_mirror_vma(vma)) {
69673+ error = __do_munmap(mm, start_m, end_m - start_m);
69674+ if (error)
69675+ goto fail;
69676+ } else {
69677+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69678+ if (!vma_m) {
69679+ error = -ENOMEM;
69680+ goto fail;
69681+ }
69682+ vma->vm_flags = newflags;
69683+ error = pax_mirror_vma(vma_m, vma);
69684+ if (error) {
69685+ vma->vm_flags = oldflags;
69686+ goto fail;
69687+ }
69688+ }
69689+ }
69690+#endif
69691+
69692 /*
69693 * First try to merge with previous and/or next vma.
69694 */
69695@@ -204,9 +306,21 @@ success:
69696 * vm_flags and vm_page_prot are protected by the mmap_sem
69697 * held in write mode.
69698 */
69699+
69700+#ifdef CONFIG_PAX_SEGMEXEC
69701+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69702+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69703+#endif
69704+
69705 vma->vm_flags = newflags;
69706+
69707+#ifdef CONFIG_PAX_MPROTECT
69708+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69709+ mm->binfmt->handle_mprotect(vma, newflags);
69710+#endif
69711+
69712 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69713- vm_get_page_prot(newflags));
69714+ vm_get_page_prot(vma->vm_flags));
69715
69716 if (vma_wants_writenotify(vma)) {
69717 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69718@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69719 end = start + len;
69720 if (end <= start)
69721 return -ENOMEM;
69722+
69723+#ifdef CONFIG_PAX_SEGMEXEC
69724+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69725+ if (end > SEGMEXEC_TASK_SIZE)
69726+ return -EINVAL;
69727+ } else
69728+#endif
69729+
69730+ if (end > TASK_SIZE)
69731+ return -EINVAL;
69732+
69733 if (!arch_validate_prot(prot))
69734 return -EINVAL;
69735
69736@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69737 /*
69738 * Does the application expect PROT_READ to imply PROT_EXEC:
69739 */
69740- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69741+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69742 prot |= PROT_EXEC;
69743
69744 vm_flags = calc_vm_prot_bits(prot);
69745@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69746 if (start > vma->vm_start)
69747 prev = vma;
69748
69749+#ifdef CONFIG_PAX_MPROTECT
69750+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69751+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69752+#endif
69753+
69754 for (nstart = start ; ; ) {
69755 unsigned long newflags;
69756
69757@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69758
69759 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69760 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69761+ if (prot & (PROT_WRITE | PROT_EXEC))
69762+ gr_log_rwxmprotect(vma->vm_file);
69763+
69764+ error = -EACCES;
69765+ goto out;
69766+ }
69767+
69768+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69769 error = -EACCES;
69770 goto out;
69771 }
69772@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69773 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69774 if (error)
69775 goto out;
69776+
69777+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69778+
69779 nstart = tmp;
69780
69781 if (nstart < prev->vm_end)
69782diff -urNp linux-3.1.1/mm/mremap.c linux-3.1.1/mm/mremap.c
69783--- linux-3.1.1/mm/mremap.c 2011-11-11 15:19:27.000000000 -0500
69784+++ linux-3.1.1/mm/mremap.c 2011-11-16 18:39:08.000000000 -0500
69785@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69786 continue;
69787 pte = ptep_clear_flush(vma, old_addr, old_pte);
69788 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69789+
69790+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69791+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69792+ pte = pte_exprotect(pte);
69793+#endif
69794+
69795 set_pte_at(mm, new_addr, new_pte, pte);
69796 }
69797
69798@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69799 if (is_vm_hugetlb_page(vma))
69800 goto Einval;
69801
69802+#ifdef CONFIG_PAX_SEGMEXEC
69803+ if (pax_find_mirror_vma(vma))
69804+ goto Einval;
69805+#endif
69806+
69807 /* We can't remap across vm area boundaries */
69808 if (old_len > vma->vm_end - addr)
69809 goto Efault;
69810@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69811 unsigned long ret = -EINVAL;
69812 unsigned long charged = 0;
69813 unsigned long map_flags;
69814+ unsigned long pax_task_size = TASK_SIZE;
69815
69816 if (new_addr & ~PAGE_MASK)
69817 goto out;
69818
69819- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69820+#ifdef CONFIG_PAX_SEGMEXEC
69821+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69822+ pax_task_size = SEGMEXEC_TASK_SIZE;
69823+#endif
69824+
69825+ pax_task_size -= PAGE_SIZE;
69826+
69827+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69828 goto out;
69829
69830 /* Check if the location we're moving into overlaps the
69831 * old location at all, and fail if it does.
69832 */
69833- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69834- goto out;
69835-
69836- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69837+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69838 goto out;
69839
69840 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69841@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69842 struct vm_area_struct *vma;
69843 unsigned long ret = -EINVAL;
69844 unsigned long charged = 0;
69845+ unsigned long pax_task_size = TASK_SIZE;
69846
69847 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69848 goto out;
69849@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69850 if (!new_len)
69851 goto out;
69852
69853+#ifdef CONFIG_PAX_SEGMEXEC
69854+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69855+ pax_task_size = SEGMEXEC_TASK_SIZE;
69856+#endif
69857+
69858+ pax_task_size -= PAGE_SIZE;
69859+
69860+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69861+ old_len > pax_task_size || addr > pax_task_size-old_len)
69862+ goto out;
69863+
69864 if (flags & MREMAP_FIXED) {
69865 if (flags & MREMAP_MAYMOVE)
69866 ret = mremap_to(addr, old_len, new_addr, new_len);
69867@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69868 addr + new_len);
69869 }
69870 ret = addr;
69871+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69872 goto out;
69873 }
69874 }
69875@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69876 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69877 if (ret)
69878 goto out;
69879+
69880+ map_flags = vma->vm_flags;
69881 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69882+ if (!(ret & ~PAGE_MASK)) {
69883+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69884+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69885+ }
69886 }
69887 out:
69888 if (ret & ~PAGE_MASK)
69889diff -urNp linux-3.1.1/mm/nobootmem.c linux-3.1.1/mm/nobootmem.c
69890--- linux-3.1.1/mm/nobootmem.c 2011-11-11 15:19:27.000000000 -0500
69891+++ linux-3.1.1/mm/nobootmem.c 2011-11-16 18:39:08.000000000 -0500
69892@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69893 unsigned long __init free_all_memory_core_early(int nodeid)
69894 {
69895 int i;
69896- u64 start, end;
69897+ u64 start, end, startrange, endrange;
69898 unsigned long count = 0;
69899- struct range *range = NULL;
69900+ struct range *range = NULL, rangerange = { 0, 0 };
69901 int nr_range;
69902
69903 nr_range = get_free_all_memory_range(&range, nodeid);
69904+ startrange = __pa(range) >> PAGE_SHIFT;
69905+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69906
69907 for (i = 0; i < nr_range; i++) {
69908 start = range[i].start;
69909 end = range[i].end;
69910+ if (start <= endrange && startrange < end) {
69911+ BUG_ON(rangerange.start | rangerange.end);
69912+ rangerange = range[i];
69913+ continue;
69914+ }
69915 count += end - start;
69916 __free_pages_memory(start, end);
69917 }
69918+ start = rangerange.start;
69919+ end = rangerange.end;
69920+ count += end - start;
69921+ __free_pages_memory(start, end);
69922
69923 return count;
69924 }
69925diff -urNp linux-3.1.1/mm/nommu.c linux-3.1.1/mm/nommu.c
69926--- linux-3.1.1/mm/nommu.c 2011-11-11 15:19:27.000000000 -0500
69927+++ linux-3.1.1/mm/nommu.c 2011-11-16 18:39:08.000000000 -0500
69928@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69929 int sysctl_overcommit_ratio = 50; /* default is 50% */
69930 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69931 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69932-int heap_stack_gap = 0;
69933
69934 atomic_long_t mmap_pages_allocated;
69935
69936@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct m
69937 EXPORT_SYMBOL(find_vma);
69938
69939 /*
69940- * find a VMA
69941- * - we don't extend stack VMAs under NOMMU conditions
69942- */
69943-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69944-{
69945- return find_vma(mm, addr);
69946-}
69947-
69948-/*
69949 * expand a stack to a given address
69950 * - not supported under NOMMU conditions
69951 */
69952@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, stru
69953
69954 /* most fields are the same, copy all, and then fixup */
69955 *new = *vma;
69956+ INIT_LIST_HEAD(&new->anon_vma_chain);
69957 *region = *vma->vm_region;
69958 new->vm_region = region;
69959
69960diff -urNp linux-3.1.1/mm/page_alloc.c linux-3.1.1/mm/page_alloc.c
69961--- linux-3.1.1/mm/page_alloc.c 2011-11-11 15:19:27.000000000 -0500
69962+++ linux-3.1.1/mm/page_alloc.c 2011-11-16 18:40:44.000000000 -0500
69963@@ -340,7 +340,7 @@ out:
69964 * This usage means that zero-order pages may not be compound.
69965 */
69966
69967-static void free_compound_page(struct page *page)
69968+void free_compound_page(struct page *page)
69969 {
69970 __free_pages_ok(page, compound_order(page));
69971 }
69972@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69973 int i;
69974 int bad = 0;
69975
69976+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69977+ unsigned long index = 1UL << order;
69978+#endif
69979+
69980 trace_mm_page_free_direct(page, order);
69981 kmemcheck_free_shadow(page, order);
69982
69983@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69984 debug_check_no_obj_freed(page_address(page),
69985 PAGE_SIZE << order);
69986 }
69987+
69988+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69989+ for (; index; --index)
69990+ sanitize_highpage(page + index - 1);
69991+#endif
69992+
69993 arch_free_page(page, order);
69994 kernel_map_pages(page, 1 << order, 0);
69995
69996@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69997 arch_alloc_page(page, order);
69998 kernel_map_pages(page, 1 << order, 1);
69999
70000+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70001 if (gfp_flags & __GFP_ZERO)
70002 prep_zero_page(page, order, gfp_flags);
70003+#endif
70004
70005 if (order && (gfp_flags & __GFP_COMP))
70006 prep_compound_page(page, order);
70007@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter
70008 int cpu;
70009 struct zone *zone;
70010
70011+ pax_track_stack();
70012+
70013 for_each_populated_zone(zone) {
70014 if (skip_free_areas_node(filter, zone_to_nid(zone)))
70015 continue;
70016@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigne
70017 unsigned long pfn;
70018
70019 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70020+#ifdef CONFIG_X86_32
70021+ /* boot failures in VMware 8 on 32bit vanilla since
70022+ this change */
70023+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70024+#else
70025 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70026+#endif
70027 return 1;
70028 }
70029 return 0;
70030diff -urNp linux-3.1.1/mm/percpu.c linux-3.1.1/mm/percpu.c
70031--- linux-3.1.1/mm/percpu.c 2011-11-11 15:19:27.000000000 -0500
70032+++ linux-3.1.1/mm/percpu.c 2011-11-16 18:39:08.000000000 -0500
70033@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
70034 static unsigned int pcpu_last_unit_cpu __read_mostly;
70035
70036 /* the address of the first chunk which starts with the kernel static area */
70037-void *pcpu_base_addr __read_mostly;
70038+void *pcpu_base_addr __read_only;
70039 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70040
70041 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70042diff -urNp linux-3.1.1/mm/rmap.c linux-3.1.1/mm/rmap.c
70043--- linux-3.1.1/mm/rmap.c 2011-11-11 15:19:27.000000000 -0500
70044+++ linux-3.1.1/mm/rmap.c 2011-11-16 18:39:08.000000000 -0500
70045@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_stru
70046 struct anon_vma *anon_vma = vma->anon_vma;
70047 struct anon_vma_chain *avc;
70048
70049+#ifdef CONFIG_PAX_SEGMEXEC
70050+ struct anon_vma_chain *avc_m = NULL;
70051+#endif
70052+
70053 might_sleep();
70054 if (unlikely(!anon_vma)) {
70055 struct mm_struct *mm = vma->vm_mm;
70056@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_stru
70057 if (!avc)
70058 goto out_enomem;
70059
70060+#ifdef CONFIG_PAX_SEGMEXEC
70061+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70062+ if (!avc_m)
70063+ goto out_enomem_free_avc;
70064+#endif
70065+
70066 anon_vma = find_mergeable_anon_vma(vma);
70067 allocated = NULL;
70068 if (!anon_vma) {
70069@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_stru
70070 /* page_table_lock to protect against threads */
70071 spin_lock(&mm->page_table_lock);
70072 if (likely(!vma->anon_vma)) {
70073+
70074+#ifdef CONFIG_PAX_SEGMEXEC
70075+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70076+
70077+ if (vma_m) {
70078+ BUG_ON(vma_m->anon_vma);
70079+ vma_m->anon_vma = anon_vma;
70080+ avc_m->anon_vma = anon_vma;
70081+ avc_m->vma = vma;
70082+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70083+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70084+ avc_m = NULL;
70085+ }
70086+#endif
70087+
70088 vma->anon_vma = anon_vma;
70089 avc->anon_vma = anon_vma;
70090 avc->vma = vma;
70091@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_stru
70092
70093 if (unlikely(allocated))
70094 put_anon_vma(allocated);
70095+
70096+#ifdef CONFIG_PAX_SEGMEXEC
70097+ if (unlikely(avc_m))
70098+ anon_vma_chain_free(avc_m);
70099+#endif
70100+
70101 if (unlikely(avc))
70102 anon_vma_chain_free(avc);
70103 }
70104 return 0;
70105
70106 out_enomem_free_avc:
70107+
70108+#ifdef CONFIG_PAX_SEGMEXEC
70109+ if (avc_m)
70110+ anon_vma_chain_free(avc_m);
70111+#endif
70112+
70113 anon_vma_chain_free(avc);
70114 out_enomem:
70115 return -ENOMEM;
70116@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct v
70117 * Attach the anon_vmas from src to dst.
70118 * Returns 0 on success, -ENOMEM on failure.
70119 */
70120-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70121+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70122 {
70123 struct anon_vma_chain *avc, *pavc;
70124 struct anon_vma *root = NULL;
70125@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct
70126 * the corresponding VMA in the parent process is attached to.
70127 * Returns 0 on success, non-zero on failure.
70128 */
70129-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70130+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70131 {
70132 struct anon_vma_chain *avc;
70133 struct anon_vma *anon_vma;
70134diff -urNp linux-3.1.1/mm/shmem.c linux-3.1.1/mm/shmem.c
70135--- linux-3.1.1/mm/shmem.c 2011-11-11 15:19:27.000000000 -0500
70136+++ linux-3.1.1/mm/shmem.c 2011-11-16 19:28:28.000000000 -0500
70137@@ -31,7 +31,7 @@
70138 #include <linux/module.h>
70139 #include <linux/swap.h>
70140
70141-static struct vfsmount *shm_mnt;
70142+struct vfsmount *shm_mnt;
70143
70144 #ifdef CONFIG_SHMEM
70145 /*
70146@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70147 #define BOGO_DIRENT_SIZE 20
70148
70149 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70150-#define SHORT_SYMLINK_LEN 128
70151+#define SHORT_SYMLINK_LEN 64
70152
70153 struct shmem_xattr {
70154 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70155@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_ent
70156 struct mempolicy mpol, *spol;
70157 struct vm_area_struct pvma;
70158
70159+ pax_track_stack();
70160+
70161 spol = mpol_cond_copy(&mpol,
70162 mpol_shared_policy_lookup(&info->policy, index));
70163
70164@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block
70165 int err = -ENOMEM;
70166
70167 /* Round up to L1_CACHE_BYTES to resist false sharing */
70168- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70169- L1_CACHE_BYTES), GFP_KERNEL);
70170+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70171 if (!sbinfo)
70172 return -ENOMEM;
70173
70174diff -urNp linux-3.1.1/mm/slab.c linux-3.1.1/mm/slab.c
70175--- linux-3.1.1/mm/slab.c 2011-11-11 15:19:27.000000000 -0500
70176+++ linux-3.1.1/mm/slab.c 2011-11-16 18:40:44.000000000 -0500
70177@@ -151,7 +151,7 @@
70178
70179 /* Legal flag mask for kmem_cache_create(). */
70180 #if DEBUG
70181-# define CREATE_MASK (SLAB_RED_ZONE | \
70182+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70183 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70184 SLAB_CACHE_DMA | \
70185 SLAB_STORE_USER | \
70186@@ -159,7 +159,7 @@
70187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70189 #else
70190-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70191+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70192 SLAB_CACHE_DMA | \
70193 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70194 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70195@@ -288,7 +288,7 @@ struct kmem_list3 {
70196 * Need this for bootstrapping a per node allocator.
70197 */
70198 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70199-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70200+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70201 #define CACHE_CACHE 0
70202 #define SIZE_AC MAX_NUMNODES
70203 #define SIZE_L3 (2 * MAX_NUMNODES)
70204@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
70205 if ((x)->max_freeable < i) \
70206 (x)->max_freeable = i; \
70207 } while (0)
70208-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70209-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70210-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70211-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70212+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70213+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70214+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70215+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70216 #else
70217 #define STATS_INC_ACTIVE(x) do { } while (0)
70218 #define STATS_DEC_ACTIVE(x) do { } while (0)
70219@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
70220 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70221 */
70222 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70223- const struct slab *slab, void *obj)
70224+ const struct slab *slab, const void *obj)
70225 {
70226 u32 offset = (obj - slab->s_mem);
70227 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70228@@ -564,7 +564,7 @@ struct cache_names {
70229 static struct cache_names __initdata cache_names[] = {
70230 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70231 #include <linux/kmalloc_sizes.h>
70232- {NULL,}
70233+ {NULL}
70234 #undef CACHE
70235 };
70236
70237@@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
70238 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70239 sizes[INDEX_AC].cs_size,
70240 ARCH_KMALLOC_MINALIGN,
70241- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70242+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70243 NULL);
70244
70245 if (INDEX_AC != INDEX_L3) {
70246@@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
70247 kmem_cache_create(names[INDEX_L3].name,
70248 sizes[INDEX_L3].cs_size,
70249 ARCH_KMALLOC_MINALIGN,
70250- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70251+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70252 NULL);
70253 }
70254
70255@@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
70256 sizes->cs_cachep = kmem_cache_create(names->name,
70257 sizes->cs_size,
70258 ARCH_KMALLOC_MINALIGN,
70259- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70260+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70261 NULL);
70262 }
70263 #ifdef CONFIG_ZONE_DMA
70264@@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, vo
70265 }
70266 /* cpu stats */
70267 {
70268- unsigned long allochit = atomic_read(&cachep->allochit);
70269- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70270- unsigned long freehit = atomic_read(&cachep->freehit);
70271- unsigned long freemiss = atomic_read(&cachep->freemiss);
70272+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70273+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70274+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70275+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70276
70277 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70278 allochit, allocmiss, freehit, freemiss);
70279@@ -4584,15 +4584,70 @@ static const struct file_operations proc
70280
70281 static int __init slab_proc_init(void)
70282 {
70283- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70284+ mode_t gr_mode = S_IRUGO;
70285+
70286+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70287+ gr_mode = S_IRUSR;
70288+#endif
70289+
70290+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70291 #ifdef CONFIG_DEBUG_SLAB_LEAK
70292- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70293+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70294 #endif
70295 return 0;
70296 }
70297 module_init(slab_proc_init);
70298 #endif
70299
70300+void check_object_size(const void *ptr, unsigned long n, bool to)
70301+{
70302+
70303+#ifdef CONFIG_PAX_USERCOPY
70304+ struct page *page;
70305+ struct kmem_cache *cachep = NULL;
70306+ struct slab *slabp;
70307+ unsigned int objnr;
70308+ unsigned long offset;
70309+ const char *type;
70310+
70311+ if (!n)
70312+ return;
70313+
70314+ type = "<null>";
70315+ if (ZERO_OR_NULL_PTR(ptr))
70316+ goto report;
70317+
70318+ if (!virt_addr_valid(ptr))
70319+ return;
70320+
70321+ page = virt_to_head_page(ptr);
70322+
70323+ type = "<process stack>";
70324+ if (!PageSlab(page)) {
70325+ if (object_is_on_stack(ptr, n) == -1)
70326+ goto report;
70327+ return;
70328+ }
70329+
70330+ cachep = page_get_cache(page);
70331+ type = cachep->name;
70332+ if (!(cachep->flags & SLAB_USERCOPY))
70333+ goto report;
70334+
70335+ slabp = page_get_slab(page);
70336+ objnr = obj_to_index(cachep, slabp, ptr);
70337+ BUG_ON(objnr >= cachep->num);
70338+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70339+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70340+ return;
70341+
70342+report:
70343+ pax_report_usercopy(ptr, n, to, type);
70344+#endif
70345+
70346+}
70347+EXPORT_SYMBOL(check_object_size);
70348+
70349 /**
70350 * ksize - get the actual amount of memory allocated for a given object
70351 * @objp: Pointer to the object
70352diff -urNp linux-3.1.1/mm/slob.c linux-3.1.1/mm/slob.c
70353--- linux-3.1.1/mm/slob.c 2011-11-11 15:19:27.000000000 -0500
70354+++ linux-3.1.1/mm/slob.c 2011-11-16 18:39:08.000000000 -0500
70355@@ -29,7 +29,7 @@
70356 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70357 * alloc_pages() directly, allocating compound pages so the page order
70358 * does not have to be separately tracked, and also stores the exact
70359- * allocation size in page->private so that it can be used to accurately
70360+ * allocation size in slob_page->size so that it can be used to accurately
70361 * provide ksize(). These objects are detected in kfree() because slob_page()
70362 * is false for them.
70363 *
70364@@ -58,6 +58,7 @@
70365 */
70366
70367 #include <linux/kernel.h>
70368+#include <linux/sched.h>
70369 #include <linux/slab.h>
70370 #include <linux/mm.h>
70371 #include <linux/swap.h> /* struct reclaim_state */
70372@@ -102,7 +103,8 @@ struct slob_page {
70373 unsigned long flags; /* mandatory */
70374 atomic_t _count; /* mandatory */
70375 slobidx_t units; /* free units left in page */
70376- unsigned long pad[2];
70377+ unsigned long pad[1];
70378+ unsigned long size; /* size when >=PAGE_SIZE */
70379 slob_t *free; /* first free slob_t in page */
70380 struct list_head list; /* linked list of free pages */
70381 };
70382@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70383 */
70384 static inline int is_slob_page(struct slob_page *sp)
70385 {
70386- return PageSlab((struct page *)sp);
70387+ return PageSlab((struct page *)sp) && !sp->size;
70388 }
70389
70390 static inline void set_slob_page(struct slob_page *sp)
70391@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70392
70393 static inline struct slob_page *slob_page(const void *addr)
70394 {
70395- return (struct slob_page *)virt_to_page(addr);
70396+ return (struct slob_page *)virt_to_head_page(addr);
70397 }
70398
70399 /*
70400@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70401 /*
70402 * Return the size of a slob block.
70403 */
70404-static slobidx_t slob_units(slob_t *s)
70405+static slobidx_t slob_units(const slob_t *s)
70406 {
70407 if (s->units > 0)
70408 return s->units;
70409@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70410 /*
70411 * Return the next free slob block pointer after this one.
70412 */
70413-static slob_t *slob_next(slob_t *s)
70414+static slob_t *slob_next(const slob_t *s)
70415 {
70416 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70417 slobidx_t next;
70418@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70419 /*
70420 * Returns true if s is the last free block in its page.
70421 */
70422-static int slob_last(slob_t *s)
70423+static int slob_last(const slob_t *s)
70424 {
70425 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70426 }
70427@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70428 if (!page)
70429 return NULL;
70430
70431+ set_slob_page(page);
70432 return page_address(page);
70433 }
70434
70435@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70436 if (!b)
70437 return NULL;
70438 sp = slob_page(b);
70439- set_slob_page(sp);
70440
70441 spin_lock_irqsave(&slob_lock, flags);
70442 sp->units = SLOB_UNITS(PAGE_SIZE);
70443 sp->free = b;
70444+ sp->size = 0;
70445 INIT_LIST_HEAD(&sp->list);
70446 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70447 set_slob_page_free(sp, slob_list);
70448@@ -476,10 +479,9 @@ out:
70449 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70450 */
70451
70452-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70453+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70454 {
70455- unsigned int *m;
70456- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70457+ slob_t *m;
70458 void *ret;
70459
70460 gfp &= gfp_allowed_mask;
70461@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
70462
70463 if (!m)
70464 return NULL;
70465- *m = size;
70466+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70467+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70468+ m[0].units = size;
70469+ m[1].units = align;
70470 ret = (void *)m + align;
70471
70472 trace_kmalloc_node(_RET_IP_, ret,
70473@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
70474 gfp |= __GFP_COMP;
70475 ret = slob_new_pages(gfp, order, node);
70476 if (ret) {
70477- struct page *page;
70478- page = virt_to_page(ret);
70479- page->private = size;
70480+ struct slob_page *sp;
70481+ sp = slob_page(ret);
70482+ sp->size = size;
70483 }
70484
70485 trace_kmalloc_node(_RET_IP_, ret,
70486 size, PAGE_SIZE << order, gfp, node);
70487 }
70488
70489- kmemleak_alloc(ret, size, 1, gfp);
70490+ return ret;
70491+}
70492+
70493+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70494+{
70495+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70496+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70497+
70498+ if (!ZERO_OR_NULL_PTR(ret))
70499+ kmemleak_alloc(ret, size, 1, gfp);
70500 return ret;
70501 }
70502 EXPORT_SYMBOL(__kmalloc_node);
70503@@ -533,13 +547,92 @@ void kfree(const void *block)
70504 sp = slob_page(block);
70505 if (is_slob_page(sp)) {
70506 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70507- unsigned int *m = (unsigned int *)(block - align);
70508- slob_free(m, *m + align);
70509- } else
70510+ slob_t *m = (slob_t *)(block - align);
70511+ slob_free(m, m[0].units + align);
70512+ } else {
70513+ clear_slob_page(sp);
70514+ free_slob_page(sp);
70515+ sp->size = 0;
70516 put_page(&sp->page);
70517+ }
70518 }
70519 EXPORT_SYMBOL(kfree);
70520
70521+void check_object_size(const void *ptr, unsigned long n, bool to)
70522+{
70523+
70524+#ifdef CONFIG_PAX_USERCOPY
70525+ struct slob_page *sp;
70526+ const slob_t *free;
70527+ const void *base;
70528+ unsigned long flags;
70529+ const char *type;
70530+
70531+ if (!n)
70532+ return;
70533+
70534+ type = "<null>";
70535+ if (ZERO_OR_NULL_PTR(ptr))
70536+ goto report;
70537+
70538+ if (!virt_addr_valid(ptr))
70539+ return;
70540+
70541+ type = "<process stack>";
70542+ sp = slob_page(ptr);
70543+ if (!PageSlab((struct page*)sp)) {
70544+ if (object_is_on_stack(ptr, n) == -1)
70545+ goto report;
70546+ return;
70547+ }
70548+
70549+ type = "<slob>";
70550+ if (sp->size) {
70551+ base = page_address(&sp->page);
70552+ if (base <= ptr && n <= sp->size - (ptr - base))
70553+ return;
70554+ goto report;
70555+ }
70556+
70557+ /* some tricky double walking to find the chunk */
70558+ spin_lock_irqsave(&slob_lock, flags);
70559+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70560+ free = sp->free;
70561+
70562+ while (!slob_last(free) && (void *)free <= ptr) {
70563+ base = free + slob_units(free);
70564+ free = slob_next(free);
70565+ }
70566+
70567+ while (base < (void *)free) {
70568+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70569+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70570+ int offset;
70571+
70572+ if (ptr < base + align)
70573+ break;
70574+
70575+ offset = ptr - base - align;
70576+ if (offset >= m) {
70577+ base += size;
70578+ continue;
70579+ }
70580+
70581+ if (n > m - offset)
70582+ break;
70583+
70584+ spin_unlock_irqrestore(&slob_lock, flags);
70585+ return;
70586+ }
70587+
70588+ spin_unlock_irqrestore(&slob_lock, flags);
70589+report:
70590+ pax_report_usercopy(ptr, n, to, type);
70591+#endif
70592+
70593+}
70594+EXPORT_SYMBOL(check_object_size);
70595+
70596 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70597 size_t ksize(const void *block)
70598 {
70599@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70600 sp = slob_page(block);
70601 if (is_slob_page(sp)) {
70602 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70603- unsigned int *m = (unsigned int *)(block - align);
70604- return SLOB_UNITS(*m) * SLOB_UNIT;
70605+ slob_t *m = (slob_t *)(block - align);
70606+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70607 } else
70608- return sp->page.private;
70609+ return sp->size;
70610 }
70611 EXPORT_SYMBOL(ksize);
70612
70613@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
70614 {
70615 struct kmem_cache *c;
70616
70617+#ifdef CONFIG_PAX_USERCOPY
70618+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70619+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70620+#else
70621 c = slob_alloc(sizeof(struct kmem_cache),
70622 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70623+#endif
70624
70625 if (c) {
70626 c->name = name;
70627@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
70628
70629 lockdep_trace_alloc(flags);
70630
70631+#ifdef CONFIG_PAX_USERCOPY
70632+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70633+#else
70634 if (c->size < PAGE_SIZE) {
70635 b = slob_alloc(c->size, flags, c->align, node);
70636 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70637 SLOB_UNITS(c->size) * SLOB_UNIT,
70638 flags, node);
70639 } else {
70640+ struct slob_page *sp;
70641+
70642 b = slob_new_pages(flags, get_order(c->size), node);
70643+ sp = slob_page(b);
70644+ sp->size = c->size;
70645 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70646 PAGE_SIZE << get_order(c->size),
70647 flags, node);
70648 }
70649+#endif
70650
70651 if (c->ctor)
70652 c->ctor(b);
70653@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70654
70655 static void __kmem_cache_free(void *b, int size)
70656 {
70657- if (size < PAGE_SIZE)
70658+ struct slob_page *sp = slob_page(b);
70659+
70660+ if (is_slob_page(sp))
70661 slob_free(b, size);
70662- else
70663+ else {
70664+ clear_slob_page(sp);
70665+ free_slob_page(sp);
70666+ sp->size = 0;
70667 slob_free_pages(b, get_order(size));
70668+ }
70669 }
70670
70671 static void kmem_rcu_free(struct rcu_head *head)
70672@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
70673
70674 void kmem_cache_free(struct kmem_cache *c, void *b)
70675 {
70676+ int size = c->size;
70677+
70678+#ifdef CONFIG_PAX_USERCOPY
70679+ if (size + c->align < PAGE_SIZE) {
70680+ size += c->align;
70681+ b -= c->align;
70682+ }
70683+#endif
70684+
70685 kmemleak_free_recursive(b, c->flags);
70686 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70687 struct slob_rcu *slob_rcu;
70688- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70689- slob_rcu->size = c->size;
70690+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70691+ slob_rcu->size = size;
70692 call_rcu(&slob_rcu->head, kmem_rcu_free);
70693 } else {
70694- __kmem_cache_free(b, c->size);
70695+ __kmem_cache_free(b, size);
70696 }
70697
70698+#ifdef CONFIG_PAX_USERCOPY
70699+ trace_kfree(_RET_IP_, b);
70700+#else
70701 trace_kmem_cache_free(_RET_IP_, b);
70702+#endif
70703+
70704 }
70705 EXPORT_SYMBOL(kmem_cache_free);
70706
70707diff -urNp linux-3.1.1/mm/slub.c linux-3.1.1/mm/slub.c
70708--- linux-3.1.1/mm/slub.c 2011-11-11 15:19:27.000000000 -0500
70709+++ linux-3.1.1/mm/slub.c 2011-11-16 19:27:25.000000000 -0500
70710@@ -208,7 +208,7 @@ struct track {
70711
70712 enum track_item { TRACK_ALLOC, TRACK_FREE };
70713
70714-#ifdef CONFIG_SYSFS
70715+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70716 static int sysfs_slab_add(struct kmem_cache *);
70717 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70718 static void sysfs_slab_remove(struct kmem_cache *);
70719@@ -556,7 +556,7 @@ static void print_track(const char *s, s
70720 if (!t->addr)
70721 return;
70722
70723- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70724+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70725 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70726 #ifdef CONFIG_STACKTRACE
70727 {
70728@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *
70729
70730 page = virt_to_head_page(x);
70731
70732+ BUG_ON(!PageSlab(page));
70733+
70734 slab_free(s, page, x, _RET_IP_);
70735
70736 trace_kmem_cache_free(_RET_IP_, x);
70737@@ -2489,7 +2491,7 @@ static int slub_min_objects;
70738 * Merge control. If this is set then no merging of slab caches will occur.
70739 * (Could be removed. This was introduced to pacify the merge skeptics.)
70740 */
70741-static int slub_nomerge;
70742+static int slub_nomerge = 1;
70743
70744 /*
70745 * Calculate the order of allocation given an slab object size.
70746@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_c
70747 * list to avoid pounding the page allocator excessively.
70748 */
70749 set_min_partial(s, ilog2(s->size));
70750- s->refcount = 1;
70751+ atomic_set(&s->refcount, 1);
70752 #ifdef CONFIG_NUMA
70753 s->remote_node_defrag_ratio = 1000;
70754 #endif
70755@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struc
70756 void kmem_cache_destroy(struct kmem_cache *s)
70757 {
70758 down_write(&slub_lock);
70759- s->refcount--;
70760- if (!s->refcount) {
70761+ if (atomic_dec_and_test(&s->refcount)) {
70762 list_del(&s->list);
70763 if (kmem_cache_close(s)) {
70764 printk(KERN_ERR "SLUB %s: %s called for cache that "
70765@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t
70766 EXPORT_SYMBOL(__kmalloc_node);
70767 #endif
70768
70769+void check_object_size(const void *ptr, unsigned long n, bool to)
70770+{
70771+
70772+#ifdef CONFIG_PAX_USERCOPY
70773+ struct page *page;
70774+ struct kmem_cache *s = NULL;
70775+ unsigned long offset;
70776+ const char *type;
70777+
70778+ if (!n)
70779+ return;
70780+
70781+ type = "<null>";
70782+ if (ZERO_OR_NULL_PTR(ptr))
70783+ goto report;
70784+
70785+ if (!virt_addr_valid(ptr))
70786+ return;
70787+
70788+ page = virt_to_head_page(ptr);
70789+
70790+ type = "<process stack>";
70791+ if (!PageSlab(page)) {
70792+ if (object_is_on_stack(ptr, n) == -1)
70793+ goto report;
70794+ return;
70795+ }
70796+
70797+ s = page->slab;
70798+ type = s->name;
70799+ if (!(s->flags & SLAB_USERCOPY))
70800+ goto report;
70801+
70802+ offset = (ptr - page_address(page)) % s->size;
70803+ if (offset <= s->objsize && n <= s->objsize - offset)
70804+ return;
70805+
70806+report:
70807+ pax_report_usercopy(ptr, n, to, type);
70808+#endif
70809+
70810+}
70811+EXPORT_SYMBOL(check_object_size);
70812+
70813 size_t ksize(const void *object)
70814 {
70815 struct page *page;
70816@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_
70817 int node;
70818
70819 list_add(&s->list, &slab_caches);
70820- s->refcount = -1;
70821+ atomic_set(&s->refcount, -1);
70822
70823 for_each_node_state(node, N_NORMAL_MEMORY) {
70824 struct kmem_cache_node *n = get_node(s, node);
70825@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
70826
70827 /* Caches that are not of the two-to-the-power-of size */
70828 if (KMALLOC_MIN_SIZE <= 32) {
70829- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70830+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70831 caches++;
70832 }
70833
70834 if (KMALLOC_MIN_SIZE <= 64) {
70835- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70836+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70837 caches++;
70838 }
70839
70840 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70841- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70842+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70843 caches++;
70844 }
70845
70846@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_
70847 /*
70848 * We may have set a slab to be unmergeable during bootstrap.
70849 */
70850- if (s->refcount < 0)
70851+ if (atomic_read(&s->refcount) < 0)
70852 return 1;
70853
70854 return 0;
70855@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(con
70856 down_write(&slub_lock);
70857 s = find_mergeable(size, align, flags, name, ctor);
70858 if (s) {
70859- s->refcount++;
70860+ atomic_inc(&s->refcount);
70861 /*
70862 * Adjust the object sizes so that we clear
70863 * the complete object on kzalloc.
70864@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(con
70865 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70866
70867 if (sysfs_slab_alias(s, name)) {
70868- s->refcount--;
70869+ atomic_dec(&s->refcount);
70870 goto err;
70871 }
70872 up_write(&slub_lock);
70873@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t
70874 }
70875 #endif
70876
70877-#ifdef CONFIG_SYSFS
70878+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70879 static int count_inuse(struct page *page)
70880 {
70881 return page->inuse;
70882@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
70883 validate_slab_cache(kmalloc_caches[9]);
70884 }
70885 #else
70886-#ifdef CONFIG_SYSFS
70887+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70888 static void resiliency_test(void) {};
70889 #endif
70890 #endif
70891
70892-#ifdef CONFIG_SYSFS
70893+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70894 enum slab_stat_type {
70895 SL_ALL, /* All slabs */
70896 SL_PARTIAL, /* Only partially allocated slabs */
70897@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
70898
70899 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70900 {
70901- return sprintf(buf, "%d\n", s->refcount - 1);
70902+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70903 }
70904 SLAB_ATTR_RO(aliases);
70905
70906@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kme
70907 return name;
70908 }
70909
70910+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70911 static int sysfs_slab_add(struct kmem_cache *s)
70912 {
70913 int err;
70914@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kme
70915 kobject_del(&s->kobj);
70916 kobject_put(&s->kobj);
70917 }
70918+#endif
70919
70920 /*
70921 * Need to buffer aliases during bootup until sysfs becomes
70922@@ -5100,6 +5147,7 @@ struct saved_alias {
70923
70924 static struct saved_alias *alias_list;
70925
70926+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70927 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70928 {
70929 struct saved_alias *al;
70930@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_
70931 alias_list = al;
70932 return 0;
70933 }
70934+#endif
70935
70936 static int __init slab_sysfs_init(void)
70937 {
70938@@ -5257,7 +5306,13 @@ static const struct file_operations proc
70939
70940 static int __init slab_proc_init(void)
70941 {
70942- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70943+ mode_t gr_mode = S_IRUGO;
70944+
70945+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70946+ gr_mode = S_IRUSR;
70947+#endif
70948+
70949+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70950 return 0;
70951 }
70952 module_init(slab_proc_init);
70953diff -urNp linux-3.1.1/mm/swap.c linux-3.1.1/mm/swap.c
70954--- linux-3.1.1/mm/swap.c 2011-11-11 15:19:27.000000000 -0500
70955+++ linux-3.1.1/mm/swap.c 2011-11-16 18:39:08.000000000 -0500
70956@@ -31,6 +31,7 @@
70957 #include <linux/backing-dev.h>
70958 #include <linux/memcontrol.h>
70959 #include <linux/gfp.h>
70960+#include <linux/hugetlb.h>
70961
70962 #include "internal.h"
70963
70964@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70965
70966 __page_cache_release(page);
70967 dtor = get_compound_page_dtor(page);
70968+ if (!PageHuge(page))
70969+ BUG_ON(dtor != free_compound_page);
70970 (*dtor)(page);
70971 }
70972
70973diff -urNp linux-3.1.1/mm/swapfile.c linux-3.1.1/mm/swapfile.c
70974--- linux-3.1.1/mm/swapfile.c 2011-11-11 15:19:27.000000000 -0500
70975+++ linux-3.1.1/mm/swapfile.c 2011-11-16 18:39:08.000000000 -0500
70976@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70977
70978 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70979 /* Activity counter to indicate that a swapon or swapoff has occurred */
70980-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70981+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70982
70983 static inline unsigned char swap_count(unsigned char ent)
70984 {
70985@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70986 }
70987 filp_close(swap_file, NULL);
70988 err = 0;
70989- atomic_inc(&proc_poll_event);
70990+ atomic_inc_unchecked(&proc_poll_event);
70991 wake_up_interruptible(&proc_poll_wait);
70992
70993 out_dput:
70994@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
70995
70996 poll_wait(file, &proc_poll_wait, wait);
70997
70998- if (seq->poll_event != atomic_read(&proc_poll_event)) {
70999- seq->poll_event = atomic_read(&proc_poll_event);
71000+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71001+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71002 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71003 }
71004
71005@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
71006 return ret;
71007
71008 seq = file->private_data;
71009- seq->poll_event = atomic_read(&proc_poll_event);
71010+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71011 return 0;
71012 }
71013
71014@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __use
71015 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71016
71017 mutex_unlock(&swapon_mutex);
71018- atomic_inc(&proc_poll_event);
71019+ atomic_inc_unchecked(&proc_poll_event);
71020 wake_up_interruptible(&proc_poll_wait);
71021
71022 if (S_ISREG(inode->i_mode))
71023diff -urNp linux-3.1.1/mm/util.c linux-3.1.1/mm/util.c
71024--- linux-3.1.1/mm/util.c 2011-11-11 15:19:27.000000000 -0500
71025+++ linux-3.1.1/mm/util.c 2011-11-16 18:39:08.000000000 -0500
71026@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71027 * allocated buffer. Use this if you don't want to free the buffer immediately
71028 * like, for example, with RCU.
71029 */
71030+#undef __krealloc
71031 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71032 {
71033 void *ret;
71034@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71035 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71036 * %NULL pointer, the object pointed to is freed.
71037 */
71038+#undef krealloc
71039 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71040 {
71041 void *ret;
71042@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
71043 void arch_pick_mmap_layout(struct mm_struct *mm)
71044 {
71045 mm->mmap_base = TASK_UNMAPPED_BASE;
71046+
71047+#ifdef CONFIG_PAX_RANDMMAP
71048+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71049+ mm->mmap_base += mm->delta_mmap;
71050+#endif
71051+
71052 mm->get_unmapped_area = arch_get_unmapped_area;
71053 mm->unmap_area = arch_unmap_area;
71054 }
71055diff -urNp linux-3.1.1/mm/vmalloc.c linux-3.1.1/mm/vmalloc.c
71056--- linux-3.1.1/mm/vmalloc.c 2011-11-11 15:19:27.000000000 -0500
71057+++ linux-3.1.1/mm/vmalloc.c 2011-11-16 18:40:44.000000000 -0500
71058@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71059
71060 pte = pte_offset_kernel(pmd, addr);
71061 do {
71062- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71063- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71064+
71065+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71066+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71067+ BUG_ON(!pte_exec(*pte));
71068+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71069+ continue;
71070+ }
71071+#endif
71072+
71073+ {
71074+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71075+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71076+ }
71077 } while (pte++, addr += PAGE_SIZE, addr != end);
71078 }
71079
71080@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71081 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71082 {
71083 pte_t *pte;
71084+ int ret = -ENOMEM;
71085
71086 /*
71087 * nr is a running index into the array which helps higher level
71088@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
71089 pte = pte_alloc_kernel(pmd, addr);
71090 if (!pte)
71091 return -ENOMEM;
71092+
71093+ pax_open_kernel();
71094 do {
71095 struct page *page = pages[*nr];
71096
71097- if (WARN_ON(!pte_none(*pte)))
71098- return -EBUSY;
71099- if (WARN_ON(!page))
71100- return -ENOMEM;
71101+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71102+ if (pgprot_val(prot) & _PAGE_NX)
71103+#endif
71104+
71105+ if (WARN_ON(!pte_none(*pte))) {
71106+ ret = -EBUSY;
71107+ goto out;
71108+ }
71109+ if (WARN_ON(!page)) {
71110+ ret = -ENOMEM;
71111+ goto out;
71112+ }
71113 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71114 (*nr)++;
71115 } while (pte++, addr += PAGE_SIZE, addr != end);
71116- return 0;
71117+ ret = 0;
71118+out:
71119+ pax_close_kernel();
71120+ return ret;
71121 }
71122
71123 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71124@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
71125 * and fall back on vmalloc() if that fails. Others
71126 * just put it in the vmalloc space.
71127 */
71128-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71129+#ifdef CONFIG_MODULES
71130+#ifdef MODULES_VADDR
71131 unsigned long addr = (unsigned long)x;
71132 if (addr >= MODULES_VADDR && addr < MODULES_END)
71133 return 1;
71134 #endif
71135+
71136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71137+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71138+ return 1;
71139+#endif
71140+
71141+#endif
71142+
71143 return is_vmalloc_addr(x);
71144 }
71145
71146@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
71147
71148 if (!pgd_none(*pgd)) {
71149 pud_t *pud = pud_offset(pgd, addr);
71150+#ifdef CONFIG_X86
71151+ if (!pud_large(*pud))
71152+#endif
71153 if (!pud_none(*pud)) {
71154 pmd_t *pmd = pmd_offset(pud, addr);
71155+#ifdef CONFIG_X86
71156+ if (!pmd_large(*pmd))
71157+#endif
71158 if (!pmd_none(*pmd)) {
71159 pte_t *ptep, pte;
71160
71161@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_n
71162 struct vm_struct *area;
71163
71164 BUG_ON(in_interrupt());
71165+
71166+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71167+ if (flags & VM_KERNEXEC) {
71168+ if (start != VMALLOC_START || end != VMALLOC_END)
71169+ return NULL;
71170+ start = (unsigned long)MODULES_EXEC_VADDR;
71171+ end = (unsigned long)MODULES_EXEC_END;
71172+ }
71173+#endif
71174+
71175 if (flags & VM_IOREMAP) {
71176 int bit = fls(size);
71177
71178@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned
71179 if (count > totalram_pages)
71180 return NULL;
71181
71182+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71183+ if (!(pgprot_val(prot) & _PAGE_NX))
71184+ flags |= VM_KERNEXEC;
71185+#endif
71186+
71187 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71188 __builtin_return_address(0));
71189 if (!area)
71190@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long
71191 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71192 return NULL;
71193
71194+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71195+ if (!(pgprot_val(prot) & _PAGE_NX))
71196+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71197+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71198+ else
71199+#endif
71200+
71201 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71202 start, end, node, gfp_mask, caller);
71203
71204@@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned lon
71205 gfp_mask, prot, node, caller);
71206 }
71207
71208+#undef __vmalloc
71209 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71210 {
71211 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71212@@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags
71213 * For tight control over page level allocator and protection flags
71214 * use __vmalloc() instead.
71215 */
71216+#undef vmalloc
71217 void *vmalloc(unsigned long size)
71218 {
71219 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71220@@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
71221 * For tight control over page level allocator and protection flags
71222 * use __vmalloc() instead.
71223 */
71224+#undef vzalloc
71225 void *vzalloc(unsigned long size)
71226 {
71227 return __vmalloc_node_flags(size, -1,
71228@@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
71229 * The resulting memory area is zeroed so it can be mapped to userspace
71230 * without leaking data.
71231 */
71232+#undef vmalloc_user
71233 void *vmalloc_user(unsigned long size)
71234 {
71235 struct vm_struct *area;
71236@@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
71237 * For tight control over page level allocator and protection flags
71238 * use __vmalloc() instead.
71239 */
71240+#undef vmalloc_node
71241 void *vmalloc_node(unsigned long size, int node)
71242 {
71243 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71244@@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
71245 * For tight control over page level allocator and protection flags
71246 * use __vmalloc_node() instead.
71247 */
71248+#undef vzalloc_node
71249 void *vzalloc_node(unsigned long size, int node)
71250 {
71251 return __vmalloc_node_flags(size, node,
71252@@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
71253 * For tight control over page level allocator and protection flags
71254 * use __vmalloc() instead.
71255 */
71256-
71257+#undef vmalloc_exec
71258 void *vmalloc_exec(unsigned long size)
71259 {
71260- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71261+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71262 -1, __builtin_return_address(0));
71263 }
71264
71265@@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
71266 * Allocate enough 32bit PA addressable pages to cover @size from the
71267 * page level allocator and map them into contiguous kernel virtual space.
71268 */
71269+#undef vmalloc_32
71270 void *vmalloc_32(unsigned long size)
71271 {
71272 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71273@@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
71274 * The resulting memory area is 32bit addressable and zeroed so it can be
71275 * mapped to userspace without leaking data.
71276 */
71277+#undef vmalloc_32_user
71278 void *vmalloc_32_user(unsigned long size)
71279 {
71280 struct vm_struct *area;
71281@@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_s
71282 unsigned long uaddr = vma->vm_start;
71283 unsigned long usize = vma->vm_end - vma->vm_start;
71284
71285+ BUG_ON(vma->vm_mirror);
71286+
71287 if ((PAGE_SIZE-1) & (unsigned long)addr)
71288 return -EINVAL;
71289
71290diff -urNp linux-3.1.1/mm/vmstat.c linux-3.1.1/mm/vmstat.c
71291--- linux-3.1.1/mm/vmstat.c 2011-11-11 15:19:27.000000000 -0500
71292+++ linux-3.1.1/mm/vmstat.c 2011-11-16 18:40:44.000000000 -0500
71293@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71294 *
71295 * vm_stat contains the global counters
71296 */
71297-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71298+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71299 EXPORT_SYMBOL(vm_stat);
71300
71301 #ifdef CONFIG_SMP
71302@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71303 v = p->vm_stat_diff[i];
71304 p->vm_stat_diff[i] = 0;
71305 local_irq_restore(flags);
71306- atomic_long_add(v, &zone->vm_stat[i]);
71307+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71308 global_diff[i] += v;
71309 #ifdef CONFIG_NUMA
71310 /* 3 seconds idle till flush */
71311@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71312
71313 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71314 if (global_diff[i])
71315- atomic_long_add(global_diff[i], &vm_stat[i]);
71316+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71317 }
71318
71319 #endif
71320@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
71321 start_cpu_timer(cpu);
71322 #endif
71323 #ifdef CONFIG_PROC_FS
71324- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71325- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71326- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71327- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71328+ {
71329+ mode_t gr_mode = S_IRUGO;
71330+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71331+ gr_mode = S_IRUSR;
71332+#endif
71333+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71334+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71335+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71336+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71337+#else
71338+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71339+#endif
71340+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71341+ }
71342 #endif
71343 return 0;
71344 }
71345diff -urNp linux-3.1.1/net/8021q/vlan.c linux-3.1.1/net/8021q/vlan.c
71346--- linux-3.1.1/net/8021q/vlan.c 2011-11-11 15:19:27.000000000 -0500
71347+++ linux-3.1.1/net/8021q/vlan.c 2011-11-16 18:39:08.000000000 -0500
71348@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net
71349 err = -EPERM;
71350 if (!capable(CAP_NET_ADMIN))
71351 break;
71352- if ((args.u.name_type >= 0) &&
71353- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71354+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71355 struct vlan_net *vn;
71356
71357 vn = net_generic(net, vlan_net_id);
71358diff -urNp linux-3.1.1/net/9p/trans_fd.c linux-3.1.1/net/9p/trans_fd.c
71359--- linux-3.1.1/net/9p/trans_fd.c 2011-11-11 15:19:27.000000000 -0500
71360+++ linux-3.1.1/net/9p/trans_fd.c 2011-11-16 18:39:08.000000000 -0500
71361@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
71362 oldfs = get_fs();
71363 set_fs(get_ds());
71364 /* The cast to a user pointer is valid due to the set_fs() */
71365- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71366+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71367 set_fs(oldfs);
71368
71369 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71370diff -urNp linux-3.1.1/net/9p/trans_virtio.c linux-3.1.1/net/9p/trans_virtio.c
71371--- linux-3.1.1/net/9p/trans_virtio.c 2011-11-11 15:19:27.000000000 -0500
71372+++ linux-3.1.1/net/9p/trans_virtio.c 2011-11-16 18:39:08.000000000 -0500
71373@@ -327,7 +327,7 @@ req_retry_pinned:
71374 } else {
71375 char *pbuf;
71376 if (req->tc->pubuf)
71377- pbuf = (__force char *) req->tc->pubuf;
71378+ pbuf = (char __force_kernel *) req->tc->pubuf;
71379 else
71380 pbuf = req->tc->pkbuf;
71381 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71382@@ -357,7 +357,7 @@ req_retry_pinned:
71383 } else {
71384 char *pbuf;
71385 if (req->tc->pubuf)
71386- pbuf = (__force char *) req->tc->pubuf;
71387+ pbuf = (char __force_kernel *) req->tc->pubuf;
71388 else
71389 pbuf = req->tc->pkbuf;
71390
71391diff -urNp linux-3.1.1/net/atm/atm_misc.c linux-3.1.1/net/atm/atm_misc.c
71392--- linux-3.1.1/net/atm/atm_misc.c 2011-11-11 15:19:27.000000000 -0500
71393+++ linux-3.1.1/net/atm/atm_misc.c 2011-11-16 18:39:08.000000000 -0500
71394@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71395 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71396 return 1;
71397 atm_return(vcc, truesize);
71398- atomic_inc(&vcc->stats->rx_drop);
71399+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71400 return 0;
71401 }
71402 EXPORT_SYMBOL(atm_charge);
71403@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71404 }
71405 }
71406 atm_return(vcc, guess);
71407- atomic_inc(&vcc->stats->rx_drop);
71408+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71409 return NULL;
71410 }
71411 EXPORT_SYMBOL(atm_alloc_charge);
71412@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71413
71414 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71415 {
71416-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71417+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71418 __SONET_ITEMS
71419 #undef __HANDLE_ITEM
71420 }
71421@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71422
71423 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71424 {
71425-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71426+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71427 __SONET_ITEMS
71428 #undef __HANDLE_ITEM
71429 }
71430diff -urNp linux-3.1.1/net/atm/lec.h linux-3.1.1/net/atm/lec.h
71431--- linux-3.1.1/net/atm/lec.h 2011-11-11 15:19:27.000000000 -0500
71432+++ linux-3.1.1/net/atm/lec.h 2011-11-16 18:39:08.000000000 -0500
71433@@ -48,7 +48,7 @@ struct lane2_ops {
71434 const u8 *tlvs, u32 sizeoftlvs);
71435 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71436 const u8 *tlvs, u32 sizeoftlvs);
71437-};
71438+} __no_const;
71439
71440 /*
71441 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71442diff -urNp linux-3.1.1/net/atm/mpc.h linux-3.1.1/net/atm/mpc.h
71443--- linux-3.1.1/net/atm/mpc.h 2011-11-11 15:19:27.000000000 -0500
71444+++ linux-3.1.1/net/atm/mpc.h 2011-11-16 18:39:08.000000000 -0500
71445@@ -33,7 +33,7 @@ struct mpoa_client {
71446 struct mpc_parameters parameters; /* parameters for this client */
71447
71448 const struct net_device_ops *old_ops;
71449- struct net_device_ops new_ops;
71450+ net_device_ops_no_const new_ops;
71451 };
71452
71453
71454diff -urNp linux-3.1.1/net/atm/mpoa_caches.c linux-3.1.1/net/atm/mpoa_caches.c
71455--- linux-3.1.1/net/atm/mpoa_caches.c 2011-11-11 15:19:27.000000000 -0500
71456+++ linux-3.1.1/net/atm/mpoa_caches.c 2011-11-16 18:40:44.000000000 -0500
71457@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71458 struct timeval now;
71459 struct k_message msg;
71460
71461+ pax_track_stack();
71462+
71463 do_gettimeofday(&now);
71464
71465 read_lock_bh(&client->ingress_lock);
71466diff -urNp linux-3.1.1/net/atm/proc.c linux-3.1.1/net/atm/proc.c
71467--- linux-3.1.1/net/atm/proc.c 2011-11-11 15:19:27.000000000 -0500
71468+++ linux-3.1.1/net/atm/proc.c 2011-11-16 18:39:08.000000000 -0500
71469@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71470 const struct k_atm_aal_stats *stats)
71471 {
71472 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71473- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71474- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71475- atomic_read(&stats->rx_drop));
71476+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71477+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71478+ atomic_read_unchecked(&stats->rx_drop));
71479 }
71480
71481 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71482diff -urNp linux-3.1.1/net/atm/resources.c linux-3.1.1/net/atm/resources.c
71483--- linux-3.1.1/net/atm/resources.c 2011-11-11 15:19:27.000000000 -0500
71484+++ linux-3.1.1/net/atm/resources.c 2011-11-16 18:39:08.000000000 -0500
71485@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71486 static void copy_aal_stats(struct k_atm_aal_stats *from,
71487 struct atm_aal_stats *to)
71488 {
71489-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71490+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71491 __AAL_STAT_ITEMS
71492 #undef __HANDLE_ITEM
71493 }
71494@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71495 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71496 struct atm_aal_stats *to)
71497 {
71498-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71499+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71500 __AAL_STAT_ITEMS
71501 #undef __HANDLE_ITEM
71502 }
71503diff -urNp linux-3.1.1/net/batman-adv/hard-interface.c linux-3.1.1/net/batman-adv/hard-interface.c
71504--- linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-11 15:19:27.000000000 -0500
71505+++ linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-16 18:39:08.000000000 -0500
71506@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_
71507 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71508 dev_add_pack(&hard_iface->batman_adv_ptype);
71509
71510- atomic_set(&hard_iface->seqno, 1);
71511- atomic_set(&hard_iface->frag_seqno, 1);
71512+ atomic_set_unchecked(&hard_iface->seqno, 1);
71513+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71514 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71515 hard_iface->net_dev->name);
71516
71517diff -urNp linux-3.1.1/net/batman-adv/routing.c linux-3.1.1/net/batman-adv/routing.c
71518--- linux-3.1.1/net/batman-adv/routing.c 2011-11-11 15:19:27.000000000 -0500
71519+++ linux-3.1.1/net/batman-adv/routing.c 2011-11-16 18:39:08.000000000 -0500
71520@@ -656,7 +656,7 @@ void receive_bat_packet(const struct eth
71521 return;
71522
71523 /* could be changed by schedule_own_packet() */
71524- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71525+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71526
71527 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71528
71529diff -urNp linux-3.1.1/net/batman-adv/send.c linux-3.1.1/net/batman-adv/send.c
71530--- linux-3.1.1/net/batman-adv/send.c 2011-11-11 15:19:27.000000000 -0500
71531+++ linux-3.1.1/net/batman-adv/send.c 2011-11-16 18:39:08.000000000 -0500
71532@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_ifa
71533
71534 /* change sequence number to network order */
71535 batman_packet->seqno =
71536- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71537+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71538
71539 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
71540 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
71541@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_ifa
71542 else
71543 batman_packet->gw_flags = NO_FLAGS;
71544
71545- atomic_inc(&hard_iface->seqno);
71546+ atomic_inc_unchecked(&hard_iface->seqno);
71547
71548 slide_own_bcast_window(hard_iface);
71549 send_time = own_send_time(bat_priv);
71550diff -urNp linux-3.1.1/net/batman-adv/soft-interface.c linux-3.1.1/net/batman-adv/soft-interface.c
71551--- linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-11 15:19:27.000000000 -0500
71552+++ linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-16 18:39:08.000000000 -0500
71553@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *
71554
71555 /* set broadcast sequence number */
71556 bcast_packet->seqno =
71557- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71558+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71559
71560 add_bcast_packet_to_list(bat_priv, skb, 1);
71561
71562@@ -824,7 +824,7 @@ struct net_device *softif_create(const c
71563 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71564
71565 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71566- atomic_set(&bat_priv->bcast_seqno, 1);
71567+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71568 atomic_set(&bat_priv->ttvn, 0);
71569 atomic_set(&bat_priv->tt_local_changes, 0);
71570 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71571diff -urNp linux-3.1.1/net/batman-adv/types.h linux-3.1.1/net/batman-adv/types.h
71572--- linux-3.1.1/net/batman-adv/types.h 2011-11-11 15:19:27.000000000 -0500
71573+++ linux-3.1.1/net/batman-adv/types.h 2011-11-16 18:39:08.000000000 -0500
71574@@ -38,8 +38,8 @@ struct hard_iface {
71575 int16_t if_num;
71576 char if_status;
71577 struct net_device *net_dev;
71578- atomic_t seqno;
71579- atomic_t frag_seqno;
71580+ atomic_unchecked_t seqno;
71581+ atomic_unchecked_t frag_seqno;
71582 unsigned char *packet_buff;
71583 int packet_len;
71584 struct kobject *hardif_obj;
71585@@ -153,7 +153,7 @@ struct bat_priv {
71586 atomic_t orig_interval; /* uint */
71587 atomic_t hop_penalty; /* uint */
71588 atomic_t log_level; /* uint */
71589- atomic_t bcast_seqno;
71590+ atomic_unchecked_t bcast_seqno;
71591 atomic_t bcast_queue_left;
71592 atomic_t batman_queue_left;
71593 atomic_t ttvn; /* tranlation table version number */
71594diff -urNp linux-3.1.1/net/batman-adv/unicast.c linux-3.1.1/net/batman-adv/unicast.c
71595--- linux-3.1.1/net/batman-adv/unicast.c 2011-11-11 15:19:27.000000000 -0500
71596+++ linux-3.1.1/net/batman-adv/unicast.c 2011-11-16 18:39:08.000000000 -0500
71597@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
71598 frag1->flags = UNI_FRAG_HEAD | large_tail;
71599 frag2->flags = large_tail;
71600
71601- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71602+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71603 frag1->seqno = htons(seqno - 1);
71604 frag2->seqno = htons(seqno);
71605
71606diff -urNp linux-3.1.1/net/bluetooth/hci_conn.c linux-3.1.1/net/bluetooth/hci_conn.c
71607--- linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-11 15:19:27.000000000 -0500
71608+++ linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-16 18:39:08.000000000 -0500
71609@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *c
71610 cp.handle = cpu_to_le16(conn->handle);
71611 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71612 cp.ediv = ediv;
71613- memcpy(cp.rand, rand, sizeof(rand));
71614+ memcpy(cp.rand, rand, sizeof(cp.rand));
71615
71616 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
71617 }
71618@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *c
71619 memset(&cp, 0, sizeof(cp));
71620
71621 cp.handle = cpu_to_le16(conn->handle);
71622- memcpy(cp.ltk, ltk, sizeof(ltk));
71623+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71624
71625 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71626 }
71627diff -urNp linux-3.1.1/net/bridge/br_multicast.c linux-3.1.1/net/bridge/br_multicast.c
71628--- linux-3.1.1/net/bridge/br_multicast.c 2011-11-11 15:19:27.000000000 -0500
71629+++ linux-3.1.1/net/bridge/br_multicast.c 2011-11-16 18:39:08.000000000 -0500
71630@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71631 nexthdr = ip6h->nexthdr;
71632 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71633
71634- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71635+ if (nexthdr != IPPROTO_ICMPV6)
71636 return 0;
71637
71638 /* Okay, we found ICMPv6 header */
71639diff -urNp linux-3.1.1/net/bridge/netfilter/ebtables.c linux-3.1.1/net/bridge/netfilter/ebtables.c
71640--- linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-11 15:19:27.000000000 -0500
71641+++ linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-16 18:40:44.000000000 -0500
71642@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *s
71643 tmp.valid_hooks = t->table->valid_hooks;
71644 }
71645 mutex_unlock(&ebt_mutex);
71646- if (copy_to_user(user, &tmp, *len) != 0){
71647+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71648 BUGPRINT("c2u Didn't work\n");
71649 ret = -EFAULT;
71650 break;
71651@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_use
71652 int ret;
71653 void __user *pos;
71654
71655+ pax_track_stack();
71656+
71657 memset(&tinfo, 0, sizeof(tinfo));
71658
71659 if (cmd == EBT_SO_GET_ENTRIES) {
71660diff -urNp linux-3.1.1/net/caif/caif_socket.c linux-3.1.1/net/caif/caif_socket.c
71661--- linux-3.1.1/net/caif/caif_socket.c 2011-11-11 15:19:27.000000000 -0500
71662+++ linux-3.1.1/net/caif/caif_socket.c 2011-11-16 18:39:08.000000000 -0500
71663@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71664 #ifdef CONFIG_DEBUG_FS
71665 struct debug_fs_counter {
71666 atomic_t caif_nr_socks;
71667- atomic_t caif_sock_create;
71668- atomic_t num_connect_req;
71669- atomic_t num_connect_resp;
71670- atomic_t num_connect_fail_resp;
71671- atomic_t num_disconnect;
71672- atomic_t num_remote_shutdown_ind;
71673- atomic_t num_tx_flow_off_ind;
71674- atomic_t num_tx_flow_on_ind;
71675- atomic_t num_rx_flow_off;
71676- atomic_t num_rx_flow_on;
71677+ atomic_unchecked_t caif_sock_create;
71678+ atomic_unchecked_t num_connect_req;
71679+ atomic_unchecked_t num_connect_resp;
71680+ atomic_unchecked_t num_connect_fail_resp;
71681+ atomic_unchecked_t num_disconnect;
71682+ atomic_unchecked_t num_remote_shutdown_ind;
71683+ atomic_unchecked_t num_tx_flow_off_ind;
71684+ atomic_unchecked_t num_tx_flow_on_ind;
71685+ atomic_unchecked_t num_rx_flow_off;
71686+ atomic_unchecked_t num_rx_flow_on;
71687 };
71688 static struct debug_fs_counter cnt;
71689 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71690+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71691 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71692 #else
71693 #define dbfs_atomic_inc(v) 0
71694@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71695 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71696 sk_rcvbuf_lowwater(cf_sk));
71697 set_rx_flow_off(cf_sk);
71698- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71699+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71700 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71701 }
71702
71703@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71704 set_rx_flow_off(cf_sk);
71705 if (net_ratelimit())
71706 pr_debug("sending flow OFF due to rmem_schedule\n");
71707- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71708+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71709 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71710 }
71711 skb->dev = NULL;
71712@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71713 switch (flow) {
71714 case CAIF_CTRLCMD_FLOW_ON_IND:
71715 /* OK from modem to start sending again */
71716- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71717+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71718 set_tx_flow_on(cf_sk);
71719 cf_sk->sk.sk_state_change(&cf_sk->sk);
71720 break;
71721
71722 case CAIF_CTRLCMD_FLOW_OFF_IND:
71723 /* Modem asks us to shut up */
71724- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71725+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71726 set_tx_flow_off(cf_sk);
71727 cf_sk->sk.sk_state_change(&cf_sk->sk);
71728 break;
71729@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71730 /* We're now connected */
71731 caif_client_register_refcnt(&cf_sk->layer,
71732 cfsk_hold, cfsk_put);
71733- dbfs_atomic_inc(&cnt.num_connect_resp);
71734+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71735 cf_sk->sk.sk_state = CAIF_CONNECTED;
71736 set_tx_flow_on(cf_sk);
71737 cf_sk->sk.sk_state_change(&cf_sk->sk);
71738@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71739
71740 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71741 /* Connect request failed */
71742- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71743+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71744 cf_sk->sk.sk_err = ECONNREFUSED;
71745 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71746 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71747@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71748
71749 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71750 /* Modem has closed this connection, or device is down. */
71751- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71752+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71753 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71754 cf_sk->sk.sk_err = ECONNRESET;
71755 set_rx_flow_on(cf_sk);
71756@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71757 return;
71758
71759 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71760- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71761+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71762 set_rx_flow_on(cf_sk);
71763 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71764 }
71765@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71766 /*ifindex = id of the interface.*/
71767 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71768
71769- dbfs_atomic_inc(&cnt.num_connect_req);
71770+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71771 cf_sk->layer.receive = caif_sktrecv_cb;
71772
71773 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71774@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71775 spin_unlock_bh(&sk->sk_receive_queue.lock);
71776 sock->sk = NULL;
71777
71778- dbfs_atomic_inc(&cnt.num_disconnect);
71779+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71780
71781 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71782 if (cf_sk->debugfs_socket_dir != NULL)
71783@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71784 cf_sk->conn_req.protocol = protocol;
71785 /* Increase the number of sockets created. */
71786 dbfs_atomic_inc(&cnt.caif_nr_socks);
71787- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71788+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71789 #ifdef CONFIG_DEBUG_FS
71790 if (!IS_ERR(debugfsdir)) {
71791
71792diff -urNp linux-3.1.1/net/caif/cfctrl.c linux-3.1.1/net/caif/cfctrl.c
71793--- linux-3.1.1/net/caif/cfctrl.c 2011-11-11 15:19:27.000000000 -0500
71794+++ linux-3.1.1/net/caif/cfctrl.c 2011-11-16 18:40:44.000000000 -0500
71795@@ -9,6 +9,7 @@
71796 #include <linux/stddef.h>
71797 #include <linux/spinlock.h>
71798 #include <linux/slab.h>
71799+#include <linux/sched.h>
71800 #include <net/caif/caif_layer.h>
71801 #include <net/caif/cfpkt.h>
71802 #include <net/caif/cfctrl.h>
71803@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71804 dev_info.id = 0xff;
71805 memset(this, 0, sizeof(*this));
71806 cfsrvl_init(&this->serv, 0, &dev_info, false);
71807- atomic_set(&this->req_seq_no, 1);
71808- atomic_set(&this->rsp_seq_no, 1);
71809+ atomic_set_unchecked(&this->req_seq_no, 1);
71810+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71811 this->serv.layer.receive = cfctrl_recv;
71812 sprintf(this->serv.layer.name, "ctrl");
71813 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71814@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71815 struct cfctrl_request_info *req)
71816 {
71817 spin_lock_bh(&ctrl->info_list_lock);
71818- atomic_inc(&ctrl->req_seq_no);
71819- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71820+ atomic_inc_unchecked(&ctrl->req_seq_no);
71821+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71822 list_add_tail(&req->list, &ctrl->list);
71823 spin_unlock_bh(&ctrl->info_list_lock);
71824 }
71825@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71826 if (p != first)
71827 pr_warn("Requests are not received in order\n");
71828
71829- atomic_set(&ctrl->rsp_seq_no,
71830+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71831 p->sequence_no);
71832 list_del(&p->list);
71833 goto out;
71834@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71835 struct cfctrl *cfctrl = container_obj(layer);
71836 struct cfctrl_request_info rsp, *req;
71837
71838+ pax_track_stack();
71839
71840 cfpkt_extr_head(pkt, &cmdrsp, 1);
71841 cmd = cmdrsp & CFCTRL_CMD_MASK;
71842diff -urNp linux-3.1.1/net/compat.c linux-3.1.1/net/compat.c
71843--- linux-3.1.1/net/compat.c 2011-11-11 15:19:27.000000000 -0500
71844+++ linux-3.1.1/net/compat.c 2011-11-16 18:39:08.000000000 -0500
71845@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71846 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71847 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71848 return -EFAULT;
71849- kmsg->msg_name = compat_ptr(tmp1);
71850- kmsg->msg_iov = compat_ptr(tmp2);
71851- kmsg->msg_control = compat_ptr(tmp3);
71852+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71853+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71854+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71855 return 0;
71856 }
71857
71858@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71859
71860 if (kern_msg->msg_namelen) {
71861 if (mode == VERIFY_READ) {
71862- int err = move_addr_to_kernel(kern_msg->msg_name,
71863+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71864 kern_msg->msg_namelen,
71865 kern_address);
71866 if (err < 0)
71867@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71868 kern_msg->msg_name = NULL;
71869
71870 tot_len = iov_from_user_compat_to_kern(kern_iov,
71871- (struct compat_iovec __user *)kern_msg->msg_iov,
71872+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71873 kern_msg->msg_iovlen);
71874 if (tot_len >= 0)
71875 kern_msg->msg_iov = kern_iov;
71876@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71877
71878 #define CMSG_COMPAT_FIRSTHDR(msg) \
71879 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71880- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71881+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71882 (struct compat_cmsghdr __user *)NULL)
71883
71884 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71885 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71886 (ucmlen) <= (unsigned long) \
71887 ((mhdr)->msg_controllen - \
71888- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71889+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71890
71891 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71892 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71893 {
71894 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71895- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71896+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71897 msg->msg_controllen)
71898 return NULL;
71899 return (struct compat_cmsghdr __user *)ptr;
71900@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71901 {
71902 struct compat_timeval ctv;
71903 struct compat_timespec cts[3];
71904- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71905+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71906 struct compat_cmsghdr cmhdr;
71907 int cmlen;
71908
71909@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71910
71911 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71912 {
71913- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71914+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71915 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71916 int fdnum = scm->fp->count;
71917 struct file **fp = scm->fp->fp;
71918@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71919 return -EFAULT;
71920 old_fs = get_fs();
71921 set_fs(KERNEL_DS);
71922- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71923+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71924 set_fs(old_fs);
71925
71926 return err;
71927@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71928 len = sizeof(ktime);
71929 old_fs = get_fs();
71930 set_fs(KERNEL_DS);
71931- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71932+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71933 set_fs(old_fs);
71934
71935 if (!err) {
71936@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71937 case MCAST_JOIN_GROUP:
71938 case MCAST_LEAVE_GROUP:
71939 {
71940- struct compat_group_req __user *gr32 = (void *)optval;
71941+ struct compat_group_req __user *gr32 = (void __user *)optval;
71942 struct group_req __user *kgr =
71943 compat_alloc_user_space(sizeof(struct group_req));
71944 u32 interface;
71945@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71946 case MCAST_BLOCK_SOURCE:
71947 case MCAST_UNBLOCK_SOURCE:
71948 {
71949- struct compat_group_source_req __user *gsr32 = (void *)optval;
71950+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71951 struct group_source_req __user *kgsr = compat_alloc_user_space(
71952 sizeof(struct group_source_req));
71953 u32 interface;
71954@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71955 }
71956 case MCAST_MSFILTER:
71957 {
71958- struct compat_group_filter __user *gf32 = (void *)optval;
71959+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71960 struct group_filter __user *kgf;
71961 u32 interface, fmode, numsrc;
71962
71963@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71964 char __user *optval, int __user *optlen,
71965 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71966 {
71967- struct compat_group_filter __user *gf32 = (void *)optval;
71968+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71969 struct group_filter __user *kgf;
71970 int __user *koptlen;
71971 u32 interface, fmode, numsrc;
71972diff -urNp linux-3.1.1/net/core/datagram.c linux-3.1.1/net/core/datagram.c
71973--- linux-3.1.1/net/core/datagram.c 2011-11-11 15:19:27.000000000 -0500
71974+++ linux-3.1.1/net/core/datagram.c 2011-11-16 18:39:08.000000000 -0500
71975@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71976 }
71977
71978 kfree_skb(skb);
71979- atomic_inc(&sk->sk_drops);
71980+ atomic_inc_unchecked(&sk->sk_drops);
71981 sk_mem_reclaim_partial(sk);
71982
71983 return err;
71984diff -urNp linux-3.1.1/net/core/dev.c linux-3.1.1/net/core/dev.c
71985--- linux-3.1.1/net/core/dev.c 2011-11-11 15:19:27.000000000 -0500
71986+++ linux-3.1.1/net/core/dev.c 2011-11-16 18:40:44.000000000 -0500
71987@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const cha
71988 if (no_module && capable(CAP_NET_ADMIN))
71989 no_module = request_module("netdev-%s", name);
71990 if (no_module && capable(CAP_SYS_MODULE)) {
71991+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71992+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71993+#else
71994 if (!request_module("%s", name))
71995 pr_err("Loading kernel module for a network device "
71996 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71997 "instead\n", name);
71998+#endif
71999 }
72000 }
72001 EXPORT_SYMBOL(dev_load);
72002@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_de
72003
72004 struct dev_gso_cb {
72005 void (*destructor)(struct sk_buff *skb);
72006-};
72007+} __no_const;
72008
72009 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72010
72011@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
72012 }
72013 EXPORT_SYMBOL(netif_rx_ni);
72014
72015-static void net_tx_action(struct softirq_action *h)
72016+static void net_tx_action(void)
72017 {
72018 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72019
72020@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *
72021 }
72022 EXPORT_SYMBOL(netif_napi_del);
72023
72024-static void net_rx_action(struct softirq_action *h)
72025+static void net_rx_action(void)
72026 {
72027 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72028 unsigned long time_limit = jiffies + 2;
72029diff -urNp linux-3.1.1/net/core/flow.c linux-3.1.1/net/core/flow.c
72030--- linux-3.1.1/net/core/flow.c 2011-11-11 15:19:27.000000000 -0500
72031+++ linux-3.1.1/net/core/flow.c 2011-11-16 18:39:08.000000000 -0500
72032@@ -61,7 +61,7 @@ struct flow_cache {
72033 struct timer_list rnd_timer;
72034 };
72035
72036-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72037+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72038 EXPORT_SYMBOL(flow_cache_genid);
72039 static struct flow_cache flow_cache_global;
72040 static struct kmem_cache *flow_cachep __read_mostly;
72041@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
72042
72043 static int flow_entry_valid(struct flow_cache_entry *fle)
72044 {
72045- if (atomic_read(&flow_cache_genid) != fle->genid)
72046+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72047 return 0;
72048 if (fle->object && !fle->object->ops->check(fle->object))
72049 return 0;
72050@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
72051 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72052 fcp->hash_count++;
72053 }
72054- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72055+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72056 flo = fle->object;
72057 if (!flo)
72058 goto ret_object;
72059@@ -280,7 +280,7 @@ nocache:
72060 }
72061 flo = resolver(net, key, family, dir, flo, ctx);
72062 if (fle) {
72063- fle->genid = atomic_read(&flow_cache_genid);
72064+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72065 if (!IS_ERR(flo))
72066 fle->object = flo;
72067 else
72068diff -urNp linux-3.1.1/net/core/iovec.c linux-3.1.1/net/core/iovec.c
72069--- linux-3.1.1/net/core/iovec.c 2011-11-11 15:19:27.000000000 -0500
72070+++ linux-3.1.1/net/core/iovec.c 2011-11-16 18:39:08.000000000 -0500
72071@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
72072 if (m->msg_namelen) {
72073 if (mode == VERIFY_READ) {
72074 void __user *namep;
72075- namep = (void __user __force *) m->msg_name;
72076+ namep = (void __force_user *) m->msg_name;
72077 err = move_addr_to_kernel(namep, m->msg_namelen,
72078 address);
72079 if (err < 0)
72080@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
72081 }
72082
72083 size = m->msg_iovlen * sizeof(struct iovec);
72084- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72085+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72086 return -EFAULT;
72087
72088 m->msg_iov = iov;
72089diff -urNp linux-3.1.1/net/core/rtnetlink.c linux-3.1.1/net/core/rtnetlink.c
72090--- linux-3.1.1/net/core/rtnetlink.c 2011-11-11 15:19:27.000000000 -0500
72091+++ linux-3.1.1/net/core/rtnetlink.c 2011-11-16 18:39:08.000000000 -0500
72092@@ -57,7 +57,7 @@ struct rtnl_link {
72093 rtnl_doit_func doit;
72094 rtnl_dumpit_func dumpit;
72095 rtnl_calcit_func calcit;
72096-};
72097+} __no_const;
72098
72099 static DEFINE_MUTEX(rtnl_mutex);
72100 static u16 min_ifinfo_dump_size;
72101diff -urNp linux-3.1.1/net/core/scm.c linux-3.1.1/net/core/scm.c
72102--- linux-3.1.1/net/core/scm.c 2011-11-11 15:19:27.000000000 -0500
72103+++ linux-3.1.1/net/core/scm.c 2011-11-16 18:39:08.000000000 -0500
72104@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
72105 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72106 {
72107 struct cmsghdr __user *cm
72108- = (__force struct cmsghdr __user *)msg->msg_control;
72109+ = (struct cmsghdr __force_user *)msg->msg_control;
72110 struct cmsghdr cmhdr;
72111 int cmlen = CMSG_LEN(len);
72112 int err;
72113@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
72114 err = -EFAULT;
72115 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72116 goto out;
72117- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72118+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72119 goto out;
72120 cmlen = CMSG_SPACE(len);
72121 if (msg->msg_controllen < cmlen)
72122@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
72123 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72124 {
72125 struct cmsghdr __user *cm
72126- = (__force struct cmsghdr __user*)msg->msg_control;
72127+ = (struct cmsghdr __force_user *)msg->msg_control;
72128
72129 int fdmax = 0;
72130 int fdnum = scm->fp->count;
72131@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
72132 if (fdnum < fdmax)
72133 fdmax = fdnum;
72134
72135- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72136+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72137 i++, cmfptr++)
72138 {
72139 int new_fd;
72140diff -urNp linux-3.1.1/net/core/skbuff.c linux-3.1.1/net/core/skbuff.c
72141--- linux-3.1.1/net/core/skbuff.c 2011-11-11 15:19:27.000000000 -0500
72142+++ linux-3.1.1/net/core/skbuff.c 2011-11-16 18:40:44.000000000 -0500
72143@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb,
72144 struct sock *sk = skb->sk;
72145 int ret = 0;
72146
72147+ pax_track_stack();
72148+
72149 if (splice_grow_spd(pipe, &spd))
72150 return -ENOMEM;
72151
72152diff -urNp linux-3.1.1/net/core/sock.c linux-3.1.1/net/core/sock.c
72153--- linux-3.1.1/net/core/sock.c 2011-11-11 15:19:27.000000000 -0500
72154+++ linux-3.1.1/net/core/sock.c 2011-11-16 18:40:44.000000000 -0500
72155@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72156 */
72157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
72158 (unsigned)sk->sk_rcvbuf) {
72159- atomic_inc(&sk->sk_drops);
72160+ atomic_inc_unchecked(&sk->sk_drops);
72161 trace_sock_rcvqueue_full(sk, skb);
72162 return -ENOMEM;
72163 }
72164@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72165 return err;
72166
72167 if (!sk_rmem_schedule(sk, skb->truesize)) {
72168- atomic_inc(&sk->sk_drops);
72169+ atomic_inc_unchecked(&sk->sk_drops);
72170 return -ENOBUFS;
72171 }
72172
72173@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72174 skb_dst_force(skb);
72175
72176 spin_lock_irqsave(&list->lock, flags);
72177- skb->dropcount = atomic_read(&sk->sk_drops);
72178+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72179 __skb_queue_tail(list, skb);
72180 spin_unlock_irqrestore(&list->lock, flags);
72181
72182@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, stru
72183 skb->dev = NULL;
72184
72185 if (sk_rcvqueues_full(sk, skb)) {
72186- atomic_inc(&sk->sk_drops);
72187+ atomic_inc_unchecked(&sk->sk_drops);
72188 goto discard_and_relse;
72189 }
72190 if (nested)
72191@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, stru
72192 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72193 } else if (sk_add_backlog(sk, skb)) {
72194 bh_unlock_sock(sk);
72195- atomic_inc(&sk->sk_drops);
72196+ atomic_inc_unchecked(&sk->sk_drops);
72197 goto discard_and_relse;
72198 }
72199
72200@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock,
72201 if (len > sizeof(peercred))
72202 len = sizeof(peercred);
72203 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72204- if (copy_to_user(optval, &peercred, len))
72205+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72206 return -EFAULT;
72207 goto lenout;
72208 }
72209@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock,
72210 return -ENOTCONN;
72211 if (lv < len)
72212 return -EINVAL;
72213- if (copy_to_user(optval, address, len))
72214+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72215 return -EFAULT;
72216 goto lenout;
72217 }
72218@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock,
72219
72220 if (len > lv)
72221 len = lv;
72222- if (copy_to_user(optval, &v, len))
72223+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72224 return -EFAULT;
72225 lenout:
72226 if (put_user(len, optlen))
72227@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock,
72228 */
72229 smp_wmb();
72230 atomic_set(&sk->sk_refcnt, 1);
72231- atomic_set(&sk->sk_drops, 0);
72232+ atomic_set_unchecked(&sk->sk_drops, 0);
72233 }
72234 EXPORT_SYMBOL(sock_init_data);
72235
72236diff -urNp linux-3.1.1/net/decnet/sysctl_net_decnet.c linux-3.1.1/net/decnet/sysctl_net_decnet.c
72237--- linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-11 15:19:27.000000000 -0500
72238+++ linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-16 18:39:08.000000000 -0500
72239@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
72240
72241 if (len > *lenp) len = *lenp;
72242
72243- if (copy_to_user(buffer, addr, len))
72244+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72245 return -EFAULT;
72246
72247 *lenp = len;
72248@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
72249
72250 if (len > *lenp) len = *lenp;
72251
72252- if (copy_to_user(buffer, devname, len))
72253+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72254 return -EFAULT;
72255
72256 *lenp = len;
72257diff -urNp linux-3.1.1/net/econet/Kconfig linux-3.1.1/net/econet/Kconfig
72258--- linux-3.1.1/net/econet/Kconfig 2011-11-11 15:19:27.000000000 -0500
72259+++ linux-3.1.1/net/econet/Kconfig 2011-11-16 18:40:44.000000000 -0500
72260@@ -4,7 +4,7 @@
72261
72262 config ECONET
72263 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72264- depends on EXPERIMENTAL && INET
72265+ depends on EXPERIMENTAL && INET && BROKEN
72266 ---help---
72267 Econet is a fairly old and slow networking protocol mainly used by
72268 Acorn computers to access file and print servers. It uses native
72269diff -urNp linux-3.1.1/net/ipv4/fib_frontend.c linux-3.1.1/net/ipv4/fib_frontend.c
72270--- linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-11 15:19:27.000000000 -0500
72271+++ linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-16 18:39:08.000000000 -0500
72272@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
72273 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72274 fib_sync_up(dev);
72275 #endif
72276- atomic_inc(&net->ipv4.dev_addr_genid);
72277+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72278 rt_cache_flush(dev_net(dev), -1);
72279 break;
72280 case NETDEV_DOWN:
72281 fib_del_ifaddr(ifa, NULL);
72282- atomic_inc(&net->ipv4.dev_addr_genid);
72283+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72284 if (ifa->ifa_dev->ifa_list == NULL) {
72285 /* Last address was deleted from this interface.
72286 * Disable IP.
72287@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
72288 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72289 fib_sync_up(dev);
72290 #endif
72291- atomic_inc(&net->ipv4.dev_addr_genid);
72292+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72293 rt_cache_flush(dev_net(dev), -1);
72294 break;
72295 case NETDEV_DOWN:
72296diff -urNp linux-3.1.1/net/ipv4/fib_semantics.c linux-3.1.1/net/ipv4/fib_semantics.c
72297--- linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-11 15:19:27.000000000 -0500
72298+++ linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-16 18:39:08.000000000 -0500
72299@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct n
72300 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72301 nh->nh_gw,
72302 nh->nh_parent->fib_scope);
72303- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72304+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72305
72306 return nh->nh_saddr;
72307 }
72308diff -urNp linux-3.1.1/net/ipv4/inet_diag.c linux-3.1.1/net/ipv4/inet_diag.c
72309--- linux-3.1.1/net/ipv4/inet_diag.c 2011-11-11 15:19:27.000000000 -0500
72310+++ linux-3.1.1/net/ipv4/inet_diag.c 2011-11-16 18:40:44.000000000 -0500
72311@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
72312 r->idiag_retrans = 0;
72313
72314 r->id.idiag_if = sk->sk_bound_dev_if;
72315+
72316+#ifdef CONFIG_GRKERNSEC_HIDESYM
72317+ r->id.idiag_cookie[0] = 0;
72318+ r->id.idiag_cookie[1] = 0;
72319+#else
72320 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72321 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72322+#endif
72323
72324 r->id.idiag_sport = inet->inet_sport;
72325 r->id.idiag_dport = inet->inet_dport;
72326@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
72327 r->idiag_family = tw->tw_family;
72328 r->idiag_retrans = 0;
72329 r->id.idiag_if = tw->tw_bound_dev_if;
72330+
72331+#ifdef CONFIG_GRKERNSEC_HIDESYM
72332+ r->id.idiag_cookie[0] = 0;
72333+ r->id.idiag_cookie[1] = 0;
72334+#else
72335 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72336 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72337+#endif
72338+
72339 r->id.idiag_sport = tw->tw_sport;
72340 r->id.idiag_dport = tw->tw_dport;
72341 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72342@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
72343 if (sk == NULL)
72344 goto unlock;
72345
72346+#ifndef CONFIG_GRKERNSEC_HIDESYM
72347 err = -ESTALE;
72348 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72349 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72350 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72351 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72352 goto out;
72353+#endif
72354
72355 err = -ENOMEM;
72356 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72357@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
72358 r->idiag_retrans = req->retrans;
72359
72360 r->id.idiag_if = sk->sk_bound_dev_if;
72361+
72362+#ifdef CONFIG_GRKERNSEC_HIDESYM
72363+ r->id.idiag_cookie[0] = 0;
72364+ r->id.idiag_cookie[1] = 0;
72365+#else
72366 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72367 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72368+#endif
72369
72370 tmo = req->expires - jiffies;
72371 if (tmo < 0)
72372diff -urNp linux-3.1.1/net/ipv4/inet_hashtables.c linux-3.1.1/net/ipv4/inet_hashtables.c
72373--- linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-11 15:19:27.000000000 -0500
72374+++ linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-16 18:40:44.000000000 -0500
72375@@ -18,12 +18,15 @@
72376 #include <linux/sched.h>
72377 #include <linux/slab.h>
72378 #include <linux/wait.h>
72379+#include <linux/security.h>
72380
72381 #include <net/inet_connection_sock.h>
72382 #include <net/inet_hashtables.h>
72383 #include <net/secure_seq.h>
72384 #include <net/ip.h>
72385
72386+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72387+
72388 /*
72389 * Allocate and initialize a new local port bind bucket.
72390 * The bindhash mutex for snum's hash chain must be held here.
72391@@ -530,6 +533,8 @@ ok:
72392 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72393 spin_unlock(&head->lock);
72394
72395+ gr_update_task_in_ip_table(current, inet_sk(sk));
72396+
72397 if (tw) {
72398 inet_twsk_deschedule(tw, death_row);
72399 while (twrefcnt) {
72400diff -urNp linux-3.1.1/net/ipv4/inetpeer.c linux-3.1.1/net/ipv4/inetpeer.c
72401--- linux-3.1.1/net/ipv4/inetpeer.c 2011-11-11 15:19:27.000000000 -0500
72402+++ linux-3.1.1/net/ipv4/inetpeer.c 2011-11-16 19:18:22.000000000 -0500
72403@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const str
72404 unsigned int sequence;
72405 int invalidated, gccnt = 0;
72406
72407+ pax_track_stack();
72408+
72409 /* Attempt a lockless lookup first.
72410 * Because of a concurrent writer, we might not find an existing entry.
72411 */
72412@@ -436,8 +438,8 @@ relookup:
72413 if (p) {
72414 p->daddr = *daddr;
72415 atomic_set(&p->refcnt, 1);
72416- atomic_set(&p->rid, 0);
72417- atomic_set(&p->ip_id_count,
72418+ atomic_set_unchecked(&p->rid, 0);
72419+ atomic_set_unchecked(&p->ip_id_count,
72420 (daddr->family == AF_INET) ?
72421 secure_ip_id(daddr->addr.a4) :
72422 secure_ipv6_id(daddr->addr.a6));
72423diff -urNp linux-3.1.1/net/ipv4/ipconfig.c linux-3.1.1/net/ipv4/ipconfig.c
72424--- linux-3.1.1/net/ipv4/ipconfig.c 2011-11-11 15:19:27.000000000 -0500
72425+++ linux-3.1.1/net/ipv4/ipconfig.c 2011-11-16 18:39:08.000000000 -0500
72426@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72427
72428 mm_segment_t oldfs = get_fs();
72429 set_fs(get_ds());
72430- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72431+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72432 set_fs(oldfs);
72433 return res;
72434 }
72435@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72436
72437 mm_segment_t oldfs = get_fs();
72438 set_fs(get_ds());
72439- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72440+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72441 set_fs(oldfs);
72442 return res;
72443 }
72444@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72445
72446 mm_segment_t oldfs = get_fs();
72447 set_fs(get_ds());
72448- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72449+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72450 set_fs(oldfs);
72451 return res;
72452 }
72453diff -urNp linux-3.1.1/net/ipv4/ip_fragment.c linux-3.1.1/net/ipv4/ip_fragment.c
72454--- linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-11 15:19:27.000000000 -0500
72455+++ linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-16 18:39:08.000000000 -0500
72456@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct
72457 return 0;
72458
72459 start = qp->rid;
72460- end = atomic_inc_return(&peer->rid);
72461+ end = atomic_inc_return_unchecked(&peer->rid);
72462 qp->rid = end;
72463
72464 rc = qp->q.fragments && (end - start) > max;
72465diff -urNp linux-3.1.1/net/ipv4/ip_sockglue.c linux-3.1.1/net/ipv4/ip_sockglue.c
72466--- linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72467+++ linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72468@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72469 int val;
72470 int len;
72471
72472+ pax_track_stack();
72473+
72474 if (level != SOL_IP)
72475 return -EOPNOTSUPP;
72476
72477@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72478 len = min_t(unsigned int, len, opt->optlen);
72479 if (put_user(len, optlen))
72480 return -EFAULT;
72481- if (copy_to_user(optval, opt->__data, len))
72482+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72483+ copy_to_user(optval, opt->__data, len))
72484 return -EFAULT;
72485 return 0;
72486 }
72487@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72488 if (sk->sk_type != SOCK_STREAM)
72489 return -ENOPROTOOPT;
72490
72491- msg.msg_control = optval;
72492+ msg.msg_control = (void __force_kernel *)optval;
72493 msg.msg_controllen = len;
72494 msg.msg_flags = flags;
72495
72496diff -urNp linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c
72497--- linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-11 15:19:27.000000000 -0500
72498+++ linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-16 18:39:08.000000000 -0500
72499@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72500
72501 *len = 0;
72502
72503- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72504+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72505 if (*octets == NULL) {
72506 if (net_ratelimit())
72507 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72508diff -urNp linux-3.1.1/net/ipv4/ping.c linux-3.1.1/net/ipv4/ping.c
72509--- linux-3.1.1/net/ipv4/ping.c 2011-11-11 15:19:27.000000000 -0500
72510+++ linux-3.1.1/net/ipv4/ping.c 2011-11-16 18:39:08.000000000 -0500
72511@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72512 sk_rmem_alloc_get(sp),
72513 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72514 atomic_read(&sp->sk_refcnt), sp,
72515- atomic_read(&sp->sk_drops), len);
72516+ atomic_read_unchecked(&sp->sk_drops), len);
72517 }
72518
72519 static int ping_seq_show(struct seq_file *seq, void *v)
72520diff -urNp linux-3.1.1/net/ipv4/raw.c linux-3.1.1/net/ipv4/raw.c
72521--- linux-3.1.1/net/ipv4/raw.c 2011-11-11 15:19:27.000000000 -0500
72522+++ linux-3.1.1/net/ipv4/raw.c 2011-11-16 18:40:44.000000000 -0500
72523@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72524 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72525 {
72526 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72527- atomic_inc(&sk->sk_drops);
72528+ atomic_inc_unchecked(&sk->sk_drops);
72529 kfree_skb(skb);
72530 return NET_RX_DROP;
72531 }
72532@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
72533
72534 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72535 {
72536+ struct icmp_filter filter;
72537+
72538 if (optlen > sizeof(struct icmp_filter))
72539 optlen = sizeof(struct icmp_filter);
72540- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72541+ if (copy_from_user(&filter, optval, optlen))
72542 return -EFAULT;
72543+ raw_sk(sk)->filter = filter;
72544 return 0;
72545 }
72546
72547 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72548 {
72549 int len, ret = -EFAULT;
72550+ struct icmp_filter filter;
72551
72552 if (get_user(len, optlen))
72553 goto out;
72554@@ -756,8 +760,9 @@ static int raw_geticmpfilter(struct sock
72555 if (len > sizeof(struct icmp_filter))
72556 len = sizeof(struct icmp_filter);
72557 ret = -EFAULT;
72558- if (put_user(len, optlen) ||
72559- copy_to_user(optval, &raw_sk(sk)->filter, len))
72560+ filter = raw_sk(sk)->filter;
72561+ if (put_user(len, optlen) || len > sizeof filter ||
72562+ copy_to_user(optval, &filter, len))
72563 goto out;
72564 ret = 0;
72565 out: return ret;
72566@@ -985,7 +990,13 @@ static void raw_sock_seq_show(struct seq
72567 sk_wmem_alloc_get(sp),
72568 sk_rmem_alloc_get(sp),
72569 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72570- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72571+ atomic_read(&sp->sk_refcnt),
72572+#ifdef CONFIG_GRKERNSEC_HIDESYM
72573+ NULL,
72574+#else
72575+ sp,
72576+#endif
72577+ atomic_read_unchecked(&sp->sk_drops));
72578 }
72579
72580 static int raw_seq_show(struct seq_file *seq, void *v)
72581diff -urNp linux-3.1.1/net/ipv4/route.c linux-3.1.1/net/ipv4/route.c
72582--- linux-3.1.1/net/ipv4/route.c 2011-11-11 15:19:27.000000000 -0500
72583+++ linux-3.1.1/net/ipv4/route.c 2011-11-16 18:39:08.000000000 -0500
72584@@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be3
72585
72586 static inline int rt_genid(struct net *net)
72587 {
72588- return atomic_read(&net->ipv4.rt_genid);
72589+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72590 }
72591
72592 #ifdef CONFIG_PROC_FS
72593@@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct n
72594 unsigned char shuffle;
72595
72596 get_random_bytes(&shuffle, sizeof(shuffle));
72597- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72598+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72599 }
72600
72601 /*
72602@@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
72603 error = rt->dst.error;
72604 if (peer) {
72605 inet_peer_refcheck(rt->peer);
72606- id = atomic_read(&peer->ip_id_count) & 0xffff;
72607+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72608 if (peer->tcp_ts_stamp) {
72609 ts = peer->tcp_ts;
72610 tsage = get_seconds() - peer->tcp_ts_stamp;
72611diff -urNp linux-3.1.1/net/ipv4/tcp.c linux-3.1.1/net/ipv4/tcp.c
72612--- linux-3.1.1/net/ipv4/tcp.c 2011-11-11 15:19:27.000000000 -0500
72613+++ linux-3.1.1/net/ipv4/tcp.c 2011-11-16 18:40:44.000000000 -0500
72614@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72615 int val;
72616 int err = 0;
72617
72618+ pax_track_stack();
72619+
72620 /* These are data/string values, all the others are ints */
72621 switch (optname) {
72622 case TCP_CONGESTION: {
72623@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72624 struct tcp_sock *tp = tcp_sk(sk);
72625 int val, len;
72626
72627+ pax_track_stack();
72628+
72629 if (get_user(len, optlen))
72630 return -EFAULT;
72631
72632diff -urNp linux-3.1.1/net/ipv4/tcp_ipv4.c linux-3.1.1/net/ipv4/tcp_ipv4.c
72633--- linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-11 15:19:27.000000000 -0500
72634+++ linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-16 18:40:44.000000000 -0500
72635@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72636 int sysctl_tcp_low_latency __read_mostly;
72637 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72638
72639+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72640+extern int grsec_enable_blackhole;
72641+#endif
72642
72643 #ifdef CONFIG_TCP_MD5SIG
72644 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72645@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72646 return 0;
72647
72648 reset:
72649+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72650+ if (!grsec_enable_blackhole)
72651+#endif
72652 tcp_v4_send_reset(rsk, skb);
72653 discard:
72654 kfree_skb(skb);
72655@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72656 TCP_SKB_CB(skb)->sacked = 0;
72657
72658 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72659- if (!sk)
72660+ if (!sk) {
72661+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72662+ ret = 1;
72663+#endif
72664 goto no_tcp_socket;
72665-
72666+ }
72667 process:
72668- if (sk->sk_state == TCP_TIME_WAIT)
72669+ if (sk->sk_state == TCP_TIME_WAIT) {
72670+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72671+ ret = 2;
72672+#endif
72673 goto do_time_wait;
72674+ }
72675
72676 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72677 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72678@@ -1739,6 +1752,10 @@ no_tcp_socket:
72679 bad_packet:
72680 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72681 } else {
72682+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72683+ if (!grsec_enable_blackhole || (ret == 1 &&
72684+ (skb->dev->flags & IFF_LOOPBACK)))
72685+#endif
72686 tcp_v4_send_reset(NULL, skb);
72687 }
72688
72689@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk
72690 0, /* non standard timer */
72691 0, /* open_requests have no inode */
72692 atomic_read(&sk->sk_refcnt),
72693+#ifdef CONFIG_GRKERNSEC_HIDESYM
72694+ NULL,
72695+#else
72696 req,
72697+#endif
72698 len);
72699 }
72700
72701@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *s
72702 sock_i_uid(sk),
72703 icsk->icsk_probes_out,
72704 sock_i_ino(sk),
72705- atomic_read(&sk->sk_refcnt), sk,
72706+ atomic_read(&sk->sk_refcnt),
72707+#ifdef CONFIG_GRKERNSEC_HIDESYM
72708+ NULL,
72709+#else
72710+ sk,
72711+#endif
72712 jiffies_to_clock_t(icsk->icsk_rto),
72713 jiffies_to_clock_t(icsk->icsk_ack.ato),
72714 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72715@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct in
72716 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72717 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72718 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72719- atomic_read(&tw->tw_refcnt), tw, len);
72720+ atomic_read(&tw->tw_refcnt),
72721+#ifdef CONFIG_GRKERNSEC_HIDESYM
72722+ NULL,
72723+#else
72724+ tw,
72725+#endif
72726+ len);
72727 }
72728
72729 #define TMPSZ 150
72730diff -urNp linux-3.1.1/net/ipv4/tcp_minisocks.c linux-3.1.1/net/ipv4/tcp_minisocks.c
72731--- linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-11 15:19:27.000000000 -0500
72732+++ linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-16 18:40:44.000000000 -0500
72733@@ -27,6 +27,10 @@
72734 #include <net/inet_common.h>
72735 #include <net/xfrm.h>
72736
72737+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72738+extern int grsec_enable_blackhole;
72739+#endif
72740+
72741 int sysctl_tcp_syncookies __read_mostly = 1;
72742 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72743
72744@@ -750,6 +754,10 @@ listen_overflow:
72745
72746 embryonic_reset:
72747 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72748+
72749+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72750+ if (!grsec_enable_blackhole)
72751+#endif
72752 if (!(flg & TCP_FLAG_RST))
72753 req->rsk_ops->send_reset(sk, skb);
72754
72755diff -urNp linux-3.1.1/net/ipv4/tcp_output.c linux-3.1.1/net/ipv4/tcp_output.c
72756--- linux-3.1.1/net/ipv4/tcp_output.c 2011-11-11 15:19:27.000000000 -0500
72757+++ linux-3.1.1/net/ipv4/tcp_output.c 2011-11-16 18:40:44.000000000 -0500
72758@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72759 int mss;
72760 int s_data_desired = 0;
72761
72762+ pax_track_stack();
72763+
72764 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72765 s_data_desired = cvp->s_data_desired;
72766 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72767diff -urNp linux-3.1.1/net/ipv4/tcp_probe.c linux-3.1.1/net/ipv4/tcp_probe.c
72768--- linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-11 15:19:27.000000000 -0500
72769+++ linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-16 18:39:08.000000000 -0500
72770@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72771 if (cnt + width >= len)
72772 break;
72773
72774- if (copy_to_user(buf + cnt, tbuf, width))
72775+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72776 return -EFAULT;
72777 cnt += width;
72778 }
72779diff -urNp linux-3.1.1/net/ipv4/tcp_timer.c linux-3.1.1/net/ipv4/tcp_timer.c
72780--- linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-11 15:19:27.000000000 -0500
72781+++ linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-16 18:40:44.000000000 -0500
72782@@ -22,6 +22,10 @@
72783 #include <linux/gfp.h>
72784 #include <net/tcp.h>
72785
72786+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72787+extern int grsec_lastack_retries;
72788+#endif
72789+
72790 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72791 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72792 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72793@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72794 }
72795 }
72796
72797+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72798+ if ((sk->sk_state == TCP_LAST_ACK) &&
72799+ (grsec_lastack_retries > 0) &&
72800+ (grsec_lastack_retries < retry_until))
72801+ retry_until = grsec_lastack_retries;
72802+#endif
72803+
72804 if (retransmits_timed_out(sk, retry_until,
72805 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72806 /* Has it gone just too far? */
72807diff -urNp linux-3.1.1/net/ipv4/udp.c linux-3.1.1/net/ipv4/udp.c
72808--- linux-3.1.1/net/ipv4/udp.c 2011-11-11 15:19:27.000000000 -0500
72809+++ linux-3.1.1/net/ipv4/udp.c 2011-11-16 19:17:54.000000000 -0500
72810@@ -86,6 +86,7 @@
72811 #include <linux/types.h>
72812 #include <linux/fcntl.h>
72813 #include <linux/module.h>
72814+#include <linux/security.h>
72815 #include <linux/socket.h>
72816 #include <linux/sockios.h>
72817 #include <linux/igmp.h>
72818@@ -108,6 +109,10 @@
72819 #include <trace/events/udp.h>
72820 #include "udp_impl.h"
72821
72822+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72823+extern int grsec_enable_blackhole;
72824+#endif
72825+
72826 struct udp_table udp_table __read_mostly;
72827 EXPORT_SYMBOL(udp_table);
72828
72829@@ -565,6 +570,9 @@ found:
72830 return s;
72831 }
72832
72833+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72834+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72835+
72836 /*
72837 * This routine is called by the ICMP module when it gets some
72838 * sort of error condition. If err < 0 then the socket should
72839@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72840 dport = usin->sin_port;
72841 if (dport == 0)
72842 return -EINVAL;
72843+
72844+ err = gr_search_udp_sendmsg(sk, usin);
72845+ if (err)
72846+ return err;
72847 } else {
72848 if (sk->sk_state != TCP_ESTABLISHED)
72849 return -EDESTADDRREQ;
72850+
72851+ err = gr_search_udp_sendmsg(sk, NULL);
72852+ if (err)
72853+ return err;
72854+
72855 daddr = inet->inet_daddr;
72856 dport = inet->inet_dport;
72857 /* Open fast path for connected socket.
72858@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(
72859 udp_lib_checksum_complete(skb)) {
72860 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72861 IS_UDPLITE(sk));
72862- atomic_inc(&sk->sk_drops);
72863+ atomic_inc_unchecked(&sk->sk_drops);
72864 __skb_unlink(skb, rcvq);
72865 __skb_queue_tail(&list_kill, skb);
72866 }
72867@@ -1185,6 +1202,10 @@ try_again:
72868 if (!skb)
72869 goto out;
72870
72871+ err = gr_search_udp_recvmsg(sk, skb);
72872+ if (err)
72873+ goto out_free;
72874+
72875 ulen = skb->len - sizeof(struct udphdr);
72876 if (len > ulen)
72877 len = ulen;
72878@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72879
72880 drop:
72881 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72882- atomic_inc(&sk->sk_drops);
72883+ atomic_inc_unchecked(&sk->sk_drops);
72884 kfree_skb(skb);
72885 return -1;
72886 }
72887@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **st
72888 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72889
72890 if (!skb1) {
72891- atomic_inc(&sk->sk_drops);
72892+ atomic_inc_unchecked(&sk->sk_drops);
72893 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72894 IS_UDPLITE(sk));
72895 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72896@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72897 goto csum_error;
72898
72899 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72900+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72901+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72902+#endif
72903 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72904
72905 /*
72906@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock
72907 sk_wmem_alloc_get(sp),
72908 sk_rmem_alloc_get(sp),
72909 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72910- atomic_read(&sp->sk_refcnt), sp,
72911- atomic_read(&sp->sk_drops), len);
72912+ atomic_read(&sp->sk_refcnt),
72913+#ifdef CONFIG_GRKERNSEC_HIDESYM
72914+ NULL,
72915+#else
72916+ sp,
72917+#endif
72918+ atomic_read_unchecked(&sp->sk_drops), len);
72919 }
72920
72921 int udp4_seq_show(struct seq_file *seq, void *v)
72922diff -urNp linux-3.1.1/net/ipv6/addrconf.c linux-3.1.1/net/ipv6/addrconf.c
72923--- linux-3.1.1/net/ipv6/addrconf.c 2011-11-11 15:19:27.000000000 -0500
72924+++ linux-3.1.1/net/ipv6/addrconf.c 2011-11-16 18:39:08.000000000 -0500
72925@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net
72926 p.iph.ihl = 5;
72927 p.iph.protocol = IPPROTO_IPV6;
72928 p.iph.ttl = 64;
72929- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72930+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72931
72932 if (ops->ndo_do_ioctl) {
72933 mm_segment_t oldfs = get_fs();
72934diff -urNp linux-3.1.1/net/ipv6/inet6_connection_sock.c linux-3.1.1/net/ipv6/inet6_connection_sock.c
72935--- linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-11 15:19:27.000000000 -0500
72936+++ linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-16 18:39:08.000000000 -0500
72937@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72938 #ifdef CONFIG_XFRM
72939 {
72940 struct rt6_info *rt = (struct rt6_info *)dst;
72941- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72942+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72943 }
72944 #endif
72945 }
72946@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72947 #ifdef CONFIG_XFRM
72948 if (dst) {
72949 struct rt6_info *rt = (struct rt6_info *)dst;
72950- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72951+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72952 __sk_dst_reset(sk);
72953 dst = NULL;
72954 }
72955diff -urNp linux-3.1.1/net/ipv6/ipv6_sockglue.c linux-3.1.1/net/ipv6/ipv6_sockglue.c
72956--- linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72957+++ linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72958@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72959 int val, valbool;
72960 int retv = -ENOPROTOOPT;
72961
72962+ pax_track_stack();
72963+
72964 if (optval == NULL)
72965 val=0;
72966 else {
72967@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72968 int len;
72969 int val;
72970
72971+ pax_track_stack();
72972+
72973 if (ip6_mroute_opt(optname))
72974 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72975
72976@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72977 if (sk->sk_type != SOCK_STREAM)
72978 return -ENOPROTOOPT;
72979
72980- msg.msg_control = optval;
72981+ msg.msg_control = (void __force_kernel *)optval;
72982 msg.msg_controllen = len;
72983 msg.msg_flags = flags;
72984
72985diff -urNp linux-3.1.1/net/ipv6/raw.c linux-3.1.1/net/ipv6/raw.c
72986--- linux-3.1.1/net/ipv6/raw.c 2011-11-11 15:19:27.000000000 -0500
72987+++ linux-3.1.1/net/ipv6/raw.c 2011-11-16 18:40:44.000000000 -0500
72988@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72989 {
72990 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72991 skb_checksum_complete(skb)) {
72992- atomic_inc(&sk->sk_drops);
72993+ atomic_inc_unchecked(&sk->sk_drops);
72994 kfree_skb(skb);
72995 return NET_RX_DROP;
72996 }
72997@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72998 struct raw6_sock *rp = raw6_sk(sk);
72999
73000 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73001- atomic_inc(&sk->sk_drops);
73002+ atomic_inc_unchecked(&sk->sk_drops);
73003 kfree_skb(skb);
73004 return NET_RX_DROP;
73005 }
73006@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73007
73008 if (inet->hdrincl) {
73009 if (skb_checksum_complete(skb)) {
73010- atomic_inc(&sk->sk_drops);
73011+ atomic_inc_unchecked(&sk->sk_drops);
73012 kfree_skb(skb);
73013 return NET_RX_DROP;
73014 }
73015@@ -601,7 +601,7 @@ out:
73016 return err;
73017 }
73018
73019-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73020+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73021 struct flowi6 *fl6, struct dst_entry **dstp,
73022 unsigned int flags)
73023 {
73024@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
73025 u16 proto;
73026 int err;
73027
73028+ pax_track_stack();
73029+
73030 /* Rough check on arithmetic overflow,
73031 better check is made in ip6_append_data().
73032 */
73033@@ -909,12 +911,15 @@ do_confirm:
73034 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73035 char __user *optval, int optlen)
73036 {
73037+ struct icmp6_filter filter;
73038+
73039 switch (optname) {
73040 case ICMPV6_FILTER:
73041 if (optlen > sizeof(struct icmp6_filter))
73042 optlen = sizeof(struct icmp6_filter);
73043- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73044+ if (copy_from_user(&filter, optval, optlen))
73045 return -EFAULT;
73046+ raw6_sk(sk)->filter = filter;
73047 return 0;
73048 default:
73049 return -ENOPROTOOPT;
73050@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
73051 char __user *optval, int __user *optlen)
73052 {
73053 int len;
73054+ struct icmp6_filter filter;
73055
73056 switch (optname) {
73057 case ICMPV6_FILTER:
73058@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
73059 len = sizeof(struct icmp6_filter);
73060 if (put_user(len, optlen))
73061 return -EFAULT;
73062- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73063+ filter = raw6_sk(sk)->filter;
73064+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73065 return -EFAULT;
73066 return 0;
73067 default:
73068@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct se
73069 0, 0L, 0,
73070 sock_i_uid(sp), 0,
73071 sock_i_ino(sp),
73072- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73073+ atomic_read(&sp->sk_refcnt),
73074+#ifdef CONFIG_GRKERNSEC_HIDESYM
73075+ NULL,
73076+#else
73077+ sp,
73078+#endif
73079+ atomic_read_unchecked(&sp->sk_drops));
73080 }
73081
73082 static int raw6_seq_show(struct seq_file *seq, void *v)
73083diff -urNp linux-3.1.1/net/ipv6/tcp_ipv6.c linux-3.1.1/net/ipv6/tcp_ipv6.c
73084--- linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-11 15:19:27.000000000 -0500
73085+++ linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-16 18:40:44.000000000 -0500
73086@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73087 }
73088 #endif
73089
73090+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73091+extern int grsec_enable_blackhole;
73092+#endif
73093+
73094 static void tcp_v6_hash(struct sock *sk)
73095 {
73096 if (sk->sk_state != TCP_CLOSE) {
73097@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73098 return 0;
73099
73100 reset:
73101+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73102+ if (!grsec_enable_blackhole)
73103+#endif
73104 tcp_v6_send_reset(sk, skb);
73105 discard:
73106 if (opt_skb)
73107@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73108 TCP_SKB_CB(skb)->sacked = 0;
73109
73110 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73111- if (!sk)
73112+ if (!sk) {
73113+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73114+ ret = 1;
73115+#endif
73116 goto no_tcp_socket;
73117+ }
73118
73119 process:
73120- if (sk->sk_state == TCP_TIME_WAIT)
73121+ if (sk->sk_state == TCP_TIME_WAIT) {
73122+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73123+ ret = 2;
73124+#endif
73125 goto do_time_wait;
73126+ }
73127
73128 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73129 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73130@@ -1779,6 +1794,10 @@ no_tcp_socket:
73131 bad_packet:
73132 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73133 } else {
73134+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73135+ if (!grsec_enable_blackhole || (ret == 1 &&
73136+ (skb->dev->flags & IFF_LOOPBACK)))
73137+#endif
73138 tcp_v6_send_reset(NULL, skb);
73139 }
73140
73141@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file
73142 uid,
73143 0, /* non standard timer */
73144 0, /* open_requests have no inode */
73145- 0, req);
73146+ 0,
73147+#ifdef CONFIG_GRKERNSEC_HIDESYM
73148+ NULL
73149+#else
73150+ req
73151+#endif
73152+ );
73153 }
73154
73155 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73156@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_fil
73157 sock_i_uid(sp),
73158 icsk->icsk_probes_out,
73159 sock_i_ino(sp),
73160- atomic_read(&sp->sk_refcnt), sp,
73161+ atomic_read(&sp->sk_refcnt),
73162+#ifdef CONFIG_GRKERNSEC_HIDESYM
73163+ NULL,
73164+#else
73165+ sp,
73166+#endif
73167 jiffies_to_clock_t(icsk->icsk_rto),
73168 jiffies_to_clock_t(icsk->icsk_ack.ato),
73169 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73170@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct se
73171 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73172 tw->tw_substate, 0, 0,
73173 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73174- atomic_read(&tw->tw_refcnt), tw);
73175+ atomic_read(&tw->tw_refcnt),
73176+#ifdef CONFIG_GRKERNSEC_HIDESYM
73177+ NULL
73178+#else
73179+ tw
73180+#endif
73181+ );
73182 }
73183
73184 static int tcp6_seq_show(struct seq_file *seq, void *v)
73185diff -urNp linux-3.1.1/net/ipv6/udp.c linux-3.1.1/net/ipv6/udp.c
73186--- linux-3.1.1/net/ipv6/udp.c 2011-11-11 15:19:27.000000000 -0500
73187+++ linux-3.1.1/net/ipv6/udp.c 2011-11-16 18:40:44.000000000 -0500
73188@@ -50,6 +50,10 @@
73189 #include <linux/seq_file.h>
73190 #include "udp_impl.h"
73191
73192+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73193+extern int grsec_enable_blackhole;
73194+#endif
73195+
73196 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73197 {
73198 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73199@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73200
73201 return 0;
73202 drop:
73203- atomic_inc(&sk->sk_drops);
73204+ atomic_inc_unchecked(&sk->sk_drops);
73205 drop_no_sk_drops_inc:
73206 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73207 kfree_skb(skb);
73208@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
73209 continue;
73210 }
73211 drop:
73212- atomic_inc(&sk->sk_drops);
73213+ atomic_inc_unchecked(&sk->sk_drops);
73214 UDP6_INC_STATS_BH(sock_net(sk),
73215 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73216 UDP6_INC_STATS_BH(sock_net(sk),
73217@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73218 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73219 proto == IPPROTO_UDPLITE);
73220
73221+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73222+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73223+#endif
73224 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73225
73226 kfree_skb(skb);
73227@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73228 if (!sock_owned_by_user(sk))
73229 udpv6_queue_rcv_skb(sk, skb);
73230 else if (sk_add_backlog(sk, skb)) {
73231- atomic_inc(&sk->sk_drops);
73232+ atomic_inc_unchecked(&sk->sk_drops);
73233 bh_unlock_sock(sk);
73234 sock_put(sk);
73235 goto discard;
73236@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
73237 0, 0L, 0,
73238 sock_i_uid(sp), 0,
73239 sock_i_ino(sp),
73240- atomic_read(&sp->sk_refcnt), sp,
73241- atomic_read(&sp->sk_drops));
73242+ atomic_read(&sp->sk_refcnt),
73243+#ifdef CONFIG_GRKERNSEC_HIDESYM
73244+ NULL,
73245+#else
73246+ sp,
73247+#endif
73248+ atomic_read_unchecked(&sp->sk_drops));
73249 }
73250
73251 int udp6_seq_show(struct seq_file *seq, void *v)
73252diff -urNp linux-3.1.1/net/irda/ircomm/ircomm_tty.c linux-3.1.1/net/irda/ircomm/ircomm_tty.c
73253--- linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-11 15:19:27.000000000 -0500
73254+++ linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-16 18:39:08.000000000 -0500
73255@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
73256 add_wait_queue(&self->open_wait, &wait);
73257
73258 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73259- __FILE__,__LINE__, tty->driver->name, self->open_count );
73260+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73261
73262 /* As far as I can see, we protect open_count - Jean II */
73263 spin_lock_irqsave(&self->spinlock, flags);
73264 if (!tty_hung_up_p(filp)) {
73265 extra_count = 1;
73266- self->open_count--;
73267+ local_dec(&self->open_count);
73268 }
73269 spin_unlock_irqrestore(&self->spinlock, flags);
73270- self->blocked_open++;
73271+ local_inc(&self->blocked_open);
73272
73273 while (1) {
73274 if (tty->termios->c_cflag & CBAUD) {
73275@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
73276 }
73277
73278 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73279- __FILE__,__LINE__, tty->driver->name, self->open_count );
73280+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73281
73282 schedule();
73283 }
73284@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
73285 if (extra_count) {
73286 /* ++ is not atomic, so this should be protected - Jean II */
73287 spin_lock_irqsave(&self->spinlock, flags);
73288- self->open_count++;
73289+ local_inc(&self->open_count);
73290 spin_unlock_irqrestore(&self->spinlock, flags);
73291 }
73292- self->blocked_open--;
73293+ local_dec(&self->blocked_open);
73294
73295 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73296- __FILE__,__LINE__, tty->driver->name, self->open_count);
73297+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73298
73299 if (!retval)
73300 self->flags |= ASYNC_NORMAL_ACTIVE;
73301@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
73302 }
73303 /* ++ is not atomic, so this should be protected - Jean II */
73304 spin_lock_irqsave(&self->spinlock, flags);
73305- self->open_count++;
73306+ local_inc(&self->open_count);
73307
73308 tty->driver_data = self;
73309 self->tty = tty;
73310 spin_unlock_irqrestore(&self->spinlock, flags);
73311
73312 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73313- self->line, self->open_count);
73314+ self->line, local_read(&self->open_count));
73315
73316 /* Not really used by us, but lets do it anyway */
73317 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73318@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
73319 return;
73320 }
73321
73322- if ((tty->count == 1) && (self->open_count != 1)) {
73323+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73324 /*
73325 * Uh, oh. tty->count is 1, which means that the tty
73326 * structure will be freed. state->count should always
73327@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
73328 */
73329 IRDA_DEBUG(0, "%s(), bad serial port count; "
73330 "tty->count is 1, state->count is %d\n", __func__ ,
73331- self->open_count);
73332- self->open_count = 1;
73333+ local_read(&self->open_count));
73334+ local_set(&self->open_count, 1);
73335 }
73336
73337- if (--self->open_count < 0) {
73338+ if (local_dec_return(&self->open_count) < 0) {
73339 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73340- __func__, self->line, self->open_count);
73341- self->open_count = 0;
73342+ __func__, self->line, local_read(&self->open_count));
73343+ local_set(&self->open_count, 0);
73344 }
73345- if (self->open_count) {
73346+ if (local_read(&self->open_count)) {
73347 spin_unlock_irqrestore(&self->spinlock, flags);
73348
73349 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73350@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
73351 tty->closing = 0;
73352 self->tty = NULL;
73353
73354- if (self->blocked_open) {
73355+ if (local_read(&self->blocked_open)) {
73356 if (self->close_delay)
73357 schedule_timeout_interruptible(self->close_delay);
73358 wake_up_interruptible(&self->open_wait);
73359@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
73360 spin_lock_irqsave(&self->spinlock, flags);
73361 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73362 self->tty = NULL;
73363- self->open_count = 0;
73364+ local_set(&self->open_count, 0);
73365 spin_unlock_irqrestore(&self->spinlock, flags);
73366
73367 wake_up_interruptible(&self->open_wait);
73368@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
73369 seq_putc(m, '\n');
73370
73371 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73372- seq_printf(m, "Open count: %d\n", self->open_count);
73373+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73374 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73375 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73376
73377diff -urNp linux-3.1.1/net/iucv/af_iucv.c linux-3.1.1/net/iucv/af_iucv.c
73378--- linux-3.1.1/net/iucv/af_iucv.c 2011-11-11 15:19:27.000000000 -0500
73379+++ linux-3.1.1/net/iucv/af_iucv.c 2011-11-16 18:39:08.000000000 -0500
73380@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
73381
73382 write_lock_bh(&iucv_sk_list.lock);
73383
73384- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73385+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73386 while (__iucv_get_sock_by_name(name)) {
73387 sprintf(name, "%08x",
73388- atomic_inc_return(&iucv_sk_list.autobind_name));
73389+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73390 }
73391
73392 write_unlock_bh(&iucv_sk_list.lock);
73393diff -urNp linux-3.1.1/net/key/af_key.c linux-3.1.1/net/key/af_key.c
73394--- linux-3.1.1/net/key/af_key.c 2011-11-11 15:19:27.000000000 -0500
73395+++ linux-3.1.1/net/key/af_key.c 2011-11-16 18:40:44.000000000 -0500
73396@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73397 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73398 struct xfrm_kmaddress k;
73399
73400+ pax_track_stack();
73401+
73402 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73403 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73404 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73405@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73406 static u32 get_acqseq(void)
73407 {
73408 u32 res;
73409- static atomic_t acqseq;
73410+ static atomic_unchecked_t acqseq;
73411
73412 do {
73413- res = atomic_inc_return(&acqseq);
73414+ res = atomic_inc_return_unchecked(&acqseq);
73415 } while (!res);
73416 return res;
73417 }
73418diff -urNp linux-3.1.1/net/lapb/lapb_iface.c linux-3.1.1/net/lapb/lapb_iface.c
73419--- linux-3.1.1/net/lapb/lapb_iface.c 2011-11-11 15:19:27.000000000 -0500
73420+++ linux-3.1.1/net/lapb/lapb_iface.c 2011-11-16 18:39:08.000000000 -0500
73421@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73422 goto out;
73423
73424 lapb->dev = dev;
73425- lapb->callbacks = *callbacks;
73426+ lapb->callbacks = callbacks;
73427
73428 __lapb_insert_cb(lapb);
73429
73430@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73431
73432 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73433 {
73434- if (lapb->callbacks.connect_confirmation)
73435- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73436+ if (lapb->callbacks->connect_confirmation)
73437+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73438 }
73439
73440 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73441 {
73442- if (lapb->callbacks.connect_indication)
73443- lapb->callbacks.connect_indication(lapb->dev, reason);
73444+ if (lapb->callbacks->connect_indication)
73445+ lapb->callbacks->connect_indication(lapb->dev, reason);
73446 }
73447
73448 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73449 {
73450- if (lapb->callbacks.disconnect_confirmation)
73451- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73452+ if (lapb->callbacks->disconnect_confirmation)
73453+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73454 }
73455
73456 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73457 {
73458- if (lapb->callbacks.disconnect_indication)
73459- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73460+ if (lapb->callbacks->disconnect_indication)
73461+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73462 }
73463
73464 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73465 {
73466- if (lapb->callbacks.data_indication)
73467- return lapb->callbacks.data_indication(lapb->dev, skb);
73468+ if (lapb->callbacks->data_indication)
73469+ return lapb->callbacks->data_indication(lapb->dev, skb);
73470
73471 kfree_skb(skb);
73472 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73473@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73474 {
73475 int used = 0;
73476
73477- if (lapb->callbacks.data_transmit) {
73478- lapb->callbacks.data_transmit(lapb->dev, skb);
73479+ if (lapb->callbacks->data_transmit) {
73480+ lapb->callbacks->data_transmit(lapb->dev, skb);
73481 used = 1;
73482 }
73483
73484diff -urNp linux-3.1.1/net/mac80211/debugfs_sta.c linux-3.1.1/net/mac80211/debugfs_sta.c
73485--- linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-11 15:19:27.000000000 -0500
73486+++ linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-16 18:40:44.000000000 -0500
73487@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73488 struct tid_ampdu_rx *tid_rx;
73489 struct tid_ampdu_tx *tid_tx;
73490
73491+ pax_track_stack();
73492+
73493 rcu_read_lock();
73494
73495 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73496@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73497 struct sta_info *sta = file->private_data;
73498 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73499
73500+ pax_track_stack();
73501+
73502 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73503 htc->ht_supported ? "" : "not ");
73504 if (htc->ht_supported) {
73505diff -urNp linux-3.1.1/net/mac80211/ieee80211_i.h linux-3.1.1/net/mac80211/ieee80211_i.h
73506--- linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-11 15:19:27.000000000 -0500
73507+++ linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-16 18:39:08.000000000 -0500
73508@@ -27,6 +27,7 @@
73509 #include <net/ieee80211_radiotap.h>
73510 #include <net/cfg80211.h>
73511 #include <net/mac80211.h>
73512+#include <asm/local.h>
73513 #include "key.h"
73514 #include "sta_info.h"
73515
73516@@ -754,7 +755,7 @@ struct ieee80211_local {
73517 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73518 spinlock_t queue_stop_reason_lock;
73519
73520- int open_count;
73521+ local_t open_count;
73522 int monitors, cooked_mntrs;
73523 /* number of interfaces with corresponding FIF_ flags */
73524 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73525diff -urNp linux-3.1.1/net/mac80211/iface.c linux-3.1.1/net/mac80211/iface.c
73526--- linux-3.1.1/net/mac80211/iface.c 2011-11-11 15:19:27.000000000 -0500
73527+++ linux-3.1.1/net/mac80211/iface.c 2011-11-16 18:39:08.000000000 -0500
73528@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73529 break;
73530 }
73531
73532- if (local->open_count == 0) {
73533+ if (local_read(&local->open_count) == 0) {
73534 res = drv_start(local);
73535 if (res)
73536 goto err_del_bss;
73537@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73538 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73539
73540 if (!is_valid_ether_addr(dev->dev_addr)) {
73541- if (!local->open_count)
73542+ if (!local_read(&local->open_count))
73543 drv_stop(local);
73544 return -EADDRNOTAVAIL;
73545 }
73546@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73547 mutex_unlock(&local->mtx);
73548
73549 if (coming_up)
73550- local->open_count++;
73551+ local_inc(&local->open_count);
73552
73553 if (hw_reconf_flags) {
73554 ieee80211_hw_config(local, hw_reconf_flags);
73555@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73556 err_del_interface:
73557 drv_remove_interface(local, &sdata->vif);
73558 err_stop:
73559- if (!local->open_count)
73560+ if (!local_read(&local->open_count))
73561 drv_stop(local);
73562 err_del_bss:
73563 sdata->bss = NULL;
73564@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
73565 }
73566
73567 if (going_down)
73568- local->open_count--;
73569+ local_dec(&local->open_count);
73570
73571 switch (sdata->vif.type) {
73572 case NL80211_IFTYPE_AP_VLAN:
73573@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
73574
73575 ieee80211_recalc_ps(local, -1);
73576
73577- if (local->open_count == 0) {
73578+ if (local_read(&local->open_count) == 0) {
73579 if (local->ops->napi_poll)
73580 napi_disable(&local->napi);
73581 ieee80211_clear_tx_pending(local);
73582diff -urNp linux-3.1.1/net/mac80211/main.c linux-3.1.1/net/mac80211/main.c
73583--- linux-3.1.1/net/mac80211/main.c 2011-11-11 15:19:27.000000000 -0500
73584+++ linux-3.1.1/net/mac80211/main.c 2011-11-16 18:39:08.000000000 -0500
73585@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73586 local->hw.conf.power_level = power;
73587 }
73588
73589- if (changed && local->open_count) {
73590+ if (changed && local_read(&local->open_count)) {
73591 ret = drv_config(local, changed);
73592 /*
73593 * Goal:
73594diff -urNp linux-3.1.1/net/mac80211/mlme.c linux-3.1.1/net/mac80211/mlme.c
73595--- linux-3.1.1/net/mac80211/mlme.c 2011-11-11 15:19:27.000000000 -0500
73596+++ linux-3.1.1/net/mac80211/mlme.c 2011-11-16 18:40:44.000000000 -0500
73597@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(stru
73598 bool have_higher_than_11mbit = false;
73599 u16 ap_ht_cap_flags;
73600
73601+ pax_track_stack();
73602+
73603 /* AssocResp and ReassocResp have identical structure */
73604
73605 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73606diff -urNp linux-3.1.1/net/mac80211/pm.c linux-3.1.1/net/mac80211/pm.c
73607--- linux-3.1.1/net/mac80211/pm.c 2011-11-11 15:19:27.000000000 -0500
73608+++ linux-3.1.1/net/mac80211/pm.c 2011-11-16 18:39:08.000000000 -0500
73609@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
73610 struct ieee80211_sub_if_data *sdata;
73611 struct sta_info *sta;
73612
73613- if (!local->open_count)
73614+ if (!local_read(&local->open_count))
73615 goto suspend;
73616
73617 ieee80211_scan_cancel(local);
73618@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
73619 cancel_work_sync(&local->dynamic_ps_enable_work);
73620 del_timer_sync(&local->dynamic_ps_timer);
73621
73622- local->wowlan = wowlan && local->open_count;
73623+ local->wowlan = wowlan && local_read(&local->open_count);
73624 if (local->wowlan) {
73625 int err = drv_suspend(local, wowlan);
73626 if (err < 0) {
73627@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211
73628 }
73629
73630 /* stop hardware - this must stop RX */
73631- if (local->open_count)
73632+ if (local_read(&local->open_count))
73633 ieee80211_stop_device(local);
73634
73635 suspend:
73636diff -urNp linux-3.1.1/net/mac80211/rate.c linux-3.1.1/net/mac80211/rate.c
73637--- linux-3.1.1/net/mac80211/rate.c 2011-11-11 15:19:27.000000000 -0500
73638+++ linux-3.1.1/net/mac80211/rate.c 2011-11-16 18:39:08.000000000 -0500
73639@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73640
73641 ASSERT_RTNL();
73642
73643- if (local->open_count)
73644+ if (local_read(&local->open_count))
73645 return -EBUSY;
73646
73647 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73648diff -urNp linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c
73649--- linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-11 15:19:27.000000000 -0500
73650+++ linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-16 18:39:08.000000000 -0500
73651@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73652
73653 spin_unlock_irqrestore(&events->lock, status);
73654
73655- if (copy_to_user(buf, pb, p))
73656+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73657 return -EFAULT;
73658
73659 return p;
73660diff -urNp linux-3.1.1/net/mac80211/util.c linux-3.1.1/net/mac80211/util.c
73661--- linux-3.1.1/net/mac80211/util.c 2011-11-11 15:19:27.000000000 -0500
73662+++ linux-3.1.1/net/mac80211/util.c 2011-11-16 18:39:08.000000000 -0500
73663@@ -1166,7 +1166,7 @@ int ieee80211_reconfig(struct ieee80211_
73664 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73665
73666 /* everything else happens only if HW was up & running */
73667- if (!local->open_count)
73668+ if (!local_read(&local->open_count))
73669 goto wake_up;
73670
73671 /*
73672diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c
73673--- linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-11 15:19:27.000000000 -0500
73674+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-16 18:39:08.000000000 -0500
73675@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73676 /* Increase the refcnt counter of the dest */
73677 atomic_inc(&dest->refcnt);
73678
73679- conn_flags = atomic_read(&dest->conn_flags);
73680+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73681 if (cp->protocol != IPPROTO_UDP)
73682 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73683 /* Bind with the destination and its corresponding transmitter */
73684@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73685 atomic_set(&cp->refcnt, 1);
73686
73687 atomic_set(&cp->n_control, 0);
73688- atomic_set(&cp->in_pkts, 0);
73689+ atomic_set_unchecked(&cp->in_pkts, 0);
73690
73691 atomic_inc(&ipvs->conn_count);
73692 if (flags & IP_VS_CONN_F_NO_CPORT)
73693@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73694
73695 /* Don't drop the entry if its number of incoming packets is not
73696 located in [0, 8] */
73697- i = atomic_read(&cp->in_pkts);
73698+ i = atomic_read_unchecked(&cp->in_pkts);
73699 if (i > 8 || i < 0) return 0;
73700
73701 if (!todrop_rate[i]) return 0;
73702diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c
73703--- linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-11 15:19:27.000000000 -0500
73704+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-16 18:39:08.000000000 -0500
73705@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73706 ret = cp->packet_xmit(skb, cp, pd->pp);
73707 /* do not touch skb anymore */
73708
73709- atomic_inc(&cp->in_pkts);
73710+ atomic_inc_unchecked(&cp->in_pkts);
73711 ip_vs_conn_put(cp);
73712 return ret;
73713 }
73714@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73715 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73716 pkts = sysctl_sync_threshold(ipvs);
73717 else
73718- pkts = atomic_add_return(1, &cp->in_pkts);
73719+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73720
73721 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73722 cp->protocol == IPPROTO_SCTP) {
73723diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c
73724--- linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-11 15:19:27.000000000 -0500
73725+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-16 19:13:12.000000000 -0500
73726@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73727 ip_vs_rs_hash(ipvs, dest);
73728 write_unlock_bh(&ipvs->rs_lock);
73729 }
73730- atomic_set(&dest->conn_flags, conn_flags);
73731+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73732
73733 /* bind the service */
73734 if (!dest->svc) {
73735@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73736 " %-7s %-6d %-10d %-10d\n",
73737 &dest->addr.in6,
73738 ntohs(dest->port),
73739- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73740+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73741 atomic_read(&dest->weight),
73742 atomic_read(&dest->activeconns),
73743 atomic_read(&dest->inactconns));
73744@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73745 "%-7s %-6d %-10d %-10d\n",
73746 ntohl(dest->addr.ip),
73747 ntohs(dest->port),
73748- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73749+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73750 atomic_read(&dest->weight),
73751 atomic_read(&dest->activeconns),
73752 atomic_read(&dest->inactconns));
73753@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73754 struct ip_vs_dest_user_kern udest;
73755 struct netns_ipvs *ipvs = net_ipvs(net);
73756
73757+ pax_track_stack();
73758+
73759 if (!capable(CAP_NET_ADMIN))
73760 return -EPERM;
73761
73762@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net
73763
73764 entry.addr = dest->addr.ip;
73765 entry.port = dest->port;
73766- entry.conn_flags = atomic_read(&dest->conn_flags);
73767+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73768 entry.weight = atomic_read(&dest->weight);
73769 entry.u_threshold = dest->u_threshold;
73770 entry.l_threshold = dest->l_threshold;
73771@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct s
73772 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73773
73774 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73775- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73776+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73777 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73778 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73779 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73780diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c
73781--- linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-11 15:19:27.000000000 -0500
73782+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-16 18:39:08.000000000 -0500
73783@@ -649,7 +649,7 @@ control:
73784 * i.e only increment in_pkts for Templates.
73785 */
73786 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73787- int pkts = atomic_add_return(1, &cp->in_pkts);
73788+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73789
73790 if (pkts % sysctl_sync_period(ipvs) != 1)
73791 return;
73792@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
73793
73794 if (opt)
73795 memcpy(&cp->in_seq, opt, sizeof(*opt));
73796- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73797+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73798 cp->state = state;
73799 cp->old_state = cp->state;
73800 /*
73801diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c
73802--- linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-11 15:19:27.000000000 -0500
73803+++ linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-16 18:39:08.000000000 -0500
73804@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73805 else
73806 rc = NF_ACCEPT;
73807 /* do not touch skb anymore */
73808- atomic_inc(&cp->in_pkts);
73809+ atomic_inc_unchecked(&cp->in_pkts);
73810 goto out;
73811 }
73812
73813@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73814 else
73815 rc = NF_ACCEPT;
73816 /* do not touch skb anymore */
73817- atomic_inc(&cp->in_pkts);
73818+ atomic_inc_unchecked(&cp->in_pkts);
73819 goto out;
73820 }
73821
73822diff -urNp linux-3.1.1/net/netfilter/Kconfig linux-3.1.1/net/netfilter/Kconfig
73823--- linux-3.1.1/net/netfilter/Kconfig 2011-11-11 15:19:27.000000000 -0500
73824+++ linux-3.1.1/net/netfilter/Kconfig 2011-11-16 18:40:44.000000000 -0500
73825@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73826
73827 To compile it as a module, choose M here. If unsure, say N.
73828
73829+config NETFILTER_XT_MATCH_GRADM
73830+ tristate '"gradm" match support'
73831+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73832+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73833+ ---help---
73834+ The gradm match allows to match on grsecurity RBAC being enabled.
73835+ It is useful when iptables rules are applied early on bootup to
73836+ prevent connections to the machine (except from a trusted host)
73837+ while the RBAC system is disabled.
73838+
73839 config NETFILTER_XT_MATCH_HASHLIMIT
73840 tristate '"hashlimit" match support'
73841 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73842diff -urNp linux-3.1.1/net/netfilter/Makefile linux-3.1.1/net/netfilter/Makefile
73843--- linux-3.1.1/net/netfilter/Makefile 2011-11-11 15:19:27.000000000 -0500
73844+++ linux-3.1.1/net/netfilter/Makefile 2011-11-16 18:40:44.000000000 -0500
73845@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73846 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73847 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73848 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73849+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73850 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73851 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73852 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73853diff -urNp linux-3.1.1/net/netfilter/nfnetlink_log.c linux-3.1.1/net/netfilter/nfnetlink_log.c
73854--- linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-11 15:19:27.000000000 -0500
73855+++ linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-16 18:39:08.000000000 -0500
73856@@ -70,7 +70,7 @@ struct nfulnl_instance {
73857 };
73858
73859 static DEFINE_SPINLOCK(instances_lock);
73860-static atomic_t global_seq;
73861+static atomic_unchecked_t global_seq;
73862
73863 #define INSTANCE_BUCKETS 16
73864 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73865@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73866 /* global sequence number */
73867 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73868 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73869- htonl(atomic_inc_return(&global_seq)));
73870+ htonl(atomic_inc_return_unchecked(&global_seq)));
73871
73872 if (data_len) {
73873 struct nlattr *nla;
73874diff -urNp linux-3.1.1/net/netfilter/xt_gradm.c linux-3.1.1/net/netfilter/xt_gradm.c
73875--- linux-3.1.1/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73876+++ linux-3.1.1/net/netfilter/xt_gradm.c 2011-11-16 18:40:44.000000000 -0500
73877@@ -0,0 +1,51 @@
73878+/*
73879+ * gradm match for netfilter
73880