]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.0.7-201110190635.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.0.7-201110190635.patch
CommitLineData
ca416633
PK
1diff -urNp linux-3.0.7/arch/alpha/include/asm/elf.h linux-3.0.7/arch/alpha/include/asm/elf.h
2--- linux-3.0.7/arch/alpha/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
3+++ linux-3.0.7/arch/alpha/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
4@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-3.0.7/arch/alpha/include/asm/pgtable.h linux-3.0.7/arch/alpha/include/asm/pgtable.h
19--- linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
20+++ linux-3.0.7/arch/alpha/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-3.0.7/arch/alpha/kernel/module.c linux-3.0.7/arch/alpha/kernel/module.c
40--- linux-3.0.7/arch/alpha/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
41+++ linux-3.0.7/arch/alpha/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-3.0.7/arch/alpha/kernel/osf_sys.c linux-3.0.7/arch/alpha/kernel/osf_sys.c
52--- linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-07-21 22:17:23.000000000 -0400
53+++ linux-3.0.7/arch/alpha/kernel/osf_sys.c 2011-08-23 21:47:55.000000000 -0400
54@@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-3.0.7/arch/alpha/mm/fault.c linux-3.0.7/arch/alpha/mm/fault.c
86--- linux-3.0.7/arch/alpha/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
87+++ linux-3.0.7/arch/alpha/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-3.0.7/arch/arm/include/asm/elf.h linux-3.0.7/arch/arm/include/asm/elf.h
245--- linux-3.0.7/arch/arm/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
246+++ linux-3.0.7/arch/arm/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
247@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267-struct mm_struct;
268-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269-#define arch_randomize_brk arch_randomize_brk
270-
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274diff -urNp linux-3.0.7/arch/arm/include/asm/kmap_types.h linux-3.0.7/arch/arm/include/asm/kmap_types.h
275--- linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
276+++ linux-3.0.7/arch/arm/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
277@@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281+ KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285diff -urNp linux-3.0.7/arch/arm/include/asm/uaccess.h linux-3.0.7/arch/arm/include/asm/uaccess.h
286--- linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
287+++ linux-3.0.7/arch/arm/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
288@@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292+extern void check_object_size(const void *ptr, unsigned long n, bool to);
293+
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297@@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305+
306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307+{
308+ if (!__builtin_constant_p(n))
309+ check_object_size(to, n, false);
310+ return ___copy_from_user(to, from, n);
311+}
312+
313+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314+{
315+ if (!__builtin_constant_p(n))
316+ check_object_size(from, n, true);
317+ return ___copy_to_user(to, from, n);
318+}
319+
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327+ if ((long)n < 0)
328+ return n;
329+
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333@@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337+ if ((long)n < 0)
338+ return n;
339+
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343diff -urNp linux-3.0.7/arch/arm/kernel/armksyms.c linux-3.0.7/arch/arm/kernel/armksyms.c
344--- linux-3.0.7/arch/arm/kernel/armksyms.c 2011-07-21 22:17:23.000000000 -0400
345+++ linux-3.0.7/arch/arm/kernel/armksyms.c 2011-08-23 21:47:55.000000000 -0400
346@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350-EXPORT_SYMBOL(__copy_from_user);
351-EXPORT_SYMBOL(__copy_to_user);
352+EXPORT_SYMBOL(___copy_from_user);
353+EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357diff -urNp linux-3.0.7/arch/arm/kernel/process.c linux-3.0.7/arch/arm/kernel/process.c
358--- linux-3.0.7/arch/arm/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
359+++ linux-3.0.7/arch/arm/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
360@@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364-#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368@@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372-unsigned long arch_randomize_brk(struct mm_struct *mm)
373-{
374- unsigned long range_end = mm->brk + 0x02000000;
375- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376-}
377-
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381diff -urNp linux-3.0.7/arch/arm/kernel/traps.c linux-3.0.7/arch/arm/kernel/traps.c
382--- linux-3.0.7/arch/arm/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
383+++ linux-3.0.7/arch/arm/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
384@@ -257,6 +257,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388+extern void gr_handle_kernel_exploit(void);
389+
390 /*
391 * This function is protected against re-entrancy.
392 */
393@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397+
398+ gr_handle_kernel_exploit();
399+
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403diff -urNp linux-3.0.7/arch/arm/lib/copy_from_user.S linux-3.0.7/arch/arm/lib/copy_from_user.S
404--- linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-07-21 22:17:23.000000000 -0400
405+++ linux-3.0.7/arch/arm/lib/copy_from_user.S 2011-08-23 21:47:55.000000000 -0400
406@@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410- * size_t __copy_from_user(void *to, const void *from, size_t n)
411+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415@@ -84,11 +84,11 @@
416
417 .text
418
419-ENTRY(__copy_from_user)
420+ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424-ENDPROC(__copy_from_user)
425+ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429diff -urNp linux-3.0.7/arch/arm/lib/copy_to_user.S linux-3.0.7/arch/arm/lib/copy_to_user.S
430--- linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-07-21 22:17:23.000000000 -0400
431+++ linux-3.0.7/arch/arm/lib/copy_to_user.S 2011-08-23 21:47:55.000000000 -0400
432@@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436- * size_t __copy_to_user(void *to, const void *from, size_t n)
437+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441@@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445-WEAK(__copy_to_user)
446+WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450-ENDPROC(__copy_to_user)
451+ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455diff -urNp linux-3.0.7/arch/arm/lib/uaccess.S linux-3.0.7/arch/arm/lib/uaccess.S
456--- linux-3.0.7/arch/arm/lib/uaccess.S 2011-07-21 22:17:23.000000000 -0400
457+++ linux-3.0.7/arch/arm/lib/uaccess.S 2011-08-23 21:47:55.000000000 -0400
458@@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471-ENTRY(__copy_to_user)
472+ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480-ENDPROC(__copy_to_user)
481+ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497-ENTRY(__copy_from_user)
498+ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506-ENDPROC(__copy_from_user)
507+ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511diff -urNp linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c
512--- linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-07-21 22:17:23.000000000 -0400
513+++ linux-3.0.7/arch/arm/lib/uaccess_with_memcpy.c 2011-08-23 21:47:55.000000000 -0400
514@@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518-__copy_to_user(void __user *to, const void *from, unsigned long n)
519+___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523diff -urNp linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c
524--- linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-07-21 22:17:23.000000000 -0400
525+++ linux-3.0.7/arch/arm/mach-ux500/mbox-db5500.c 2011-08-23 21:48:14.000000000 -0400
526@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535diff -urNp linux-3.0.7/arch/arm/mm/fault.c linux-3.0.7/arch/arm/mm/fault.c
536--- linux-3.0.7/arch/arm/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
537+++ linux-3.0.7/arch/arm/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
538@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542+#ifdef CONFIG_PAX_PAGEEXEC
543+ if (fsr & FSR_LNX_PF) {
544+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545+ do_group_exit(SIGKILL);
546+ }
547+#endif
548+
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552@@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556+#ifdef CONFIG_PAX_PAGEEXEC
557+void pax_report_insns(void *pc, void *sp)
558+{
559+ long i;
560+
561+ printk(KERN_ERR "PAX: bytes at PC: ");
562+ for (i = 0; i < 20; i++) {
563+ unsigned char c;
564+ if (get_user(c, (__force unsigned char __user *)pc+i))
565+ printk(KERN_CONT "?? ");
566+ else
567+ printk(KERN_CONT "%02x ", c);
568+ }
569+ printk("\n");
570+
571+ printk(KERN_ERR "PAX: bytes at SP-4: ");
572+ for (i = -1; i < 20; i++) {
573+ unsigned long c;
574+ if (get_user(c, (__force unsigned long __user *)sp+i))
575+ printk(KERN_CONT "???????? ");
576+ else
577+ printk(KERN_CONT "%08lx ", c);
578+ }
579+ printk("\n");
580+}
581+#endif
582+
583 /*
584 * First Level Translation Fault Handler
585 *
586diff -urNp linux-3.0.7/arch/arm/mm/mmap.c linux-3.0.7/arch/arm/mm/mmap.c
587--- linux-3.0.7/arch/arm/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
588+++ linux-3.0.7/arch/arm/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
589@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593+#ifdef CONFIG_PAX_RANDMMAP
594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595+#endif
596+
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604- if (TASK_SIZE - len >= addr &&
605- (!vma || addr + len <= vma->vm_start))
606+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610- start_addr = addr = mm->free_area_cache;
611+ start_addr = addr = mm->free_area_cache;
612 } else {
613- start_addr = addr = TASK_UNMAPPED_BASE;
614- mm->cached_hole_size = 0;
615+ start_addr = addr = mm->mmap_base;
616+ mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620@@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624- if (start_addr != TASK_UNMAPPED_BASE) {
625- start_addr = addr = TASK_UNMAPPED_BASE;
626+ if (start_addr != mm->mmap_base) {
627+ start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633- if (!vma || addr + len <= vma->vm_start) {
634+ if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638diff -urNp linux-3.0.7/arch/avr32/include/asm/elf.h linux-3.0.7/arch/avr32/include/asm/elf.h
639--- linux-3.0.7/arch/avr32/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
640+++ linux-3.0.7/arch/avr32/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
641@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648+#ifdef CONFIG_PAX_ASLR
649+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650+
651+#define PAX_DELTA_MMAP_LEN 15
652+#define PAX_DELTA_STACK_LEN 15
653+#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657diff -urNp linux-3.0.7/arch/avr32/include/asm/kmap_types.h linux-3.0.7/arch/avr32/include/asm/kmap_types.h
658--- linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
659+++ linux-3.0.7/arch/avr32/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
660@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664-D(14) KM_TYPE_NR
665+D(14) KM_CLEARPAGE,
666+D(15) KM_TYPE_NR
667 };
668
669 #undef D
670diff -urNp linux-3.0.7/arch/avr32/mm/fault.c linux-3.0.7/arch/avr32/mm/fault.c
671--- linux-3.0.7/arch/avr32/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
672+++ linux-3.0.7/arch/avr32/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
673@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677+#ifdef CONFIG_PAX_PAGEEXEC
678+void pax_report_insns(void *pc, void *sp)
679+{
680+ unsigned long i;
681+
682+ printk(KERN_ERR "PAX: bytes at PC: ");
683+ for (i = 0; i < 20; i++) {
684+ unsigned char c;
685+ if (get_user(c, (unsigned char *)pc+i))
686+ printk(KERN_CONT "???????? ");
687+ else
688+ printk(KERN_CONT "%02x ", c);
689+ }
690+ printk("\n");
691+}
692+#endif
693+
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697@@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701+
702+#ifdef CONFIG_PAX_PAGEEXEC
703+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706+ do_group_exit(SIGKILL);
707+ }
708+ }
709+#endif
710+
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714diff -urNp linux-3.0.7/arch/frv/include/asm/kmap_types.h linux-3.0.7/arch/frv/include/asm/kmap_types.h
715--- linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
716+++ linux-3.0.7/arch/frv/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
717@@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721+ KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725diff -urNp linux-3.0.7/arch/frv/mm/elf-fdpic.c linux-3.0.7/arch/frv/mm/elf-fdpic.c
726--- linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-07-21 22:17:23.000000000 -0400
727+++ linux-3.0.7/arch/frv/mm/elf-fdpic.c 2011-08-23 21:47:55.000000000 -0400
728@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732- if (TASK_SIZE - len >= addr &&
733- (!vma || addr + len <= vma->vm_start))
734+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742- if (addr + len <= vma->vm_start)
743+ if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751- if (addr + len <= vma->vm_start)
752+ if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756diff -urNp linux-3.0.7/arch/ia64/include/asm/elf.h linux-3.0.7/arch/ia64/include/asm/elf.h
757--- linux-3.0.7/arch/ia64/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
758+++ linux-3.0.7/arch/ia64/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
759@@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763+#ifdef CONFIG_PAX_ASLR
764+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765+
766+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768+#endif
769+
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773diff -urNp linux-3.0.7/arch/ia64/include/asm/pgtable.h linux-3.0.7/arch/ia64/include/asm/pgtable.h
774--- linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
775+++ linux-3.0.7/arch/ia64/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
776@@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780-
781+#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785@@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789+
790+#ifdef CONFIG_PAX_PAGEEXEC
791+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794+#else
795+# define PAGE_SHARED_NOEXEC PAGE_SHARED
796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
797+# define PAGE_COPY_NOEXEC PAGE_COPY
798+#endif
799+
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803diff -urNp linux-3.0.7/arch/ia64/include/asm/spinlock.h linux-3.0.7/arch/ia64/include/asm/spinlock.h
804--- linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
805+++ linux-3.0.7/arch/ia64/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
806@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815diff -urNp linux-3.0.7/arch/ia64/include/asm/uaccess.h linux-3.0.7/arch/ia64/include/asm/uaccess.h
816--- linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
817+++ linux-3.0.7/arch/ia64/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
818@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822- if (__access_ok(__cu_to, __cu_len, get_fs())) \
823+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831- if (__access_ok(__cu_from, __cu_len, get_fs())) \
832+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836diff -urNp linux-3.0.7/arch/ia64/kernel/module.c linux-3.0.7/arch/ia64/kernel/module.c
837--- linux-3.0.7/arch/ia64/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
838+++ linux-3.0.7/arch/ia64/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
839@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843- if (mod && mod->arch.init_unw_table &&
844- module_region == mod->module_init) {
845+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853+in_init_rx (const struct module *mod, uint64_t addr)
854+{
855+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856+}
857+
858+static inline int
859+in_init_rw (const struct module *mod, uint64_t addr)
860+{
861+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862+}
863+
864+static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867- return addr - (uint64_t) mod->module_init < mod->init_size;
868+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869+}
870+
871+static inline int
872+in_core_rx (const struct module *mod, uint64_t addr)
873+{
874+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875+}
876+
877+static inline int
878+in_core_rw (const struct module *mod, uint64_t addr)
879+{
880+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886- return addr - (uint64_t) mod->module_core < mod->core_size;
887+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896+ if (in_init_rx(mod, val))
897+ val -= (uint64_t) mod->module_init_rx;
898+ else if (in_init_rw(mod, val))
899+ val -= (uint64_t) mod->module_init_rw;
900+ else if (in_core_rx(mod, val))
901+ val -= (uint64_t) mod->module_core_rx;
902+ else if (in_core_rw(mod, val))
903+ val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911- if (mod->core_size > MAX_LTOFF)
912+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917- gp = mod->core_size - MAX_LTOFF / 2;
918+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920- gp = mod->core_size / 2;
921- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927diff -urNp linux-3.0.7/arch/ia64/kernel/sys_ia64.c linux-3.0.7/arch/ia64/kernel/sys_ia64.c
928--- linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-07-21 22:17:23.000000000 -0400
929+++ linux-3.0.7/arch/ia64/kernel/sys_ia64.c 2011-08-23 21:47:55.000000000 -0400
930@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934+
935+#ifdef CONFIG_PAX_RANDMMAP
936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
937+ addr = mm->free_area_cache;
938+ else
939+#endif
940+
941 if (!addr)
942 addr = mm->free_area_cache;
943
944@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948- if (start_addr != TASK_UNMAPPED_BASE) {
949+ if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951- addr = TASK_UNMAPPED_BASE;
952+ addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957- if (!vma || addr + len <= vma->vm_start) {
958+ if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962diff -urNp linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S
963--- linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
964+++ linux-3.0.7/arch/ia64/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
965@@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969- __phys_per_cpu_start = __per_cpu_load;
970+ __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974diff -urNp linux-3.0.7/arch/ia64/mm/fault.c linux-3.0.7/arch/ia64/mm/fault.c
975--- linux-3.0.7/arch/ia64/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
976+++ linux-3.0.7/arch/ia64/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
977@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981+#ifdef CONFIG_PAX_PAGEEXEC
982+void pax_report_insns(void *pc, void *sp)
983+{
984+ unsigned long i;
985+
986+ printk(KERN_ERR "PAX: bytes at PC: ");
987+ for (i = 0; i < 8; i++) {
988+ unsigned int c;
989+ if (get_user(c, (unsigned int *)pc+i))
990+ printk(KERN_CONT "???????? ");
991+ else
992+ printk(KERN_CONT "%08x ", c);
993+ }
994+ printk("\n");
995+}
996+#endif
997+
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005- if ((vma->vm_flags & mask) != mask)
1006+ if ((vma->vm_flags & mask) != mask) {
1007+
1008+#ifdef CONFIG_PAX_PAGEEXEC
1009+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011+ goto bad_area;
1012+
1013+ up_read(&mm->mmap_sem);
1014+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015+ do_group_exit(SIGKILL);
1016+ }
1017+#endif
1018+
1019 goto bad_area;
1020
1021+ }
1022+
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026diff -urNp linux-3.0.7/arch/ia64/mm/hugetlbpage.c linux-3.0.7/arch/ia64/mm/hugetlbpage.c
1027--- linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
1028+++ linux-3.0.7/arch/ia64/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
1029@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033- if (!vmm || (addr + len) <= vmm->vm_start)
1034+ if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038diff -urNp linux-3.0.7/arch/ia64/mm/init.c linux-3.0.7/arch/ia64/mm/init.c
1039--- linux-3.0.7/arch/ia64/mm/init.c 2011-07-21 22:17:23.000000000 -0400
1040+++ linux-3.0.7/arch/ia64/mm/init.c 2011-08-23 21:47:55.000000000 -0400
1041@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045+
1046+#ifdef CONFIG_PAX_PAGEEXEC
1047+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048+ vma->vm_flags &= ~VM_EXEC;
1049+
1050+#ifdef CONFIG_PAX_MPROTECT
1051+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052+ vma->vm_flags &= ~VM_MAYEXEC;
1053+#endif
1054+
1055+ }
1056+#endif
1057+
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061diff -urNp linux-3.0.7/arch/m32r/lib/usercopy.c linux-3.0.7/arch/m32r/lib/usercopy.c
1062--- linux-3.0.7/arch/m32r/lib/usercopy.c 2011-07-21 22:17:23.000000000 -0400
1063+++ linux-3.0.7/arch/m32r/lib/usercopy.c 2011-08-23 21:47:55.000000000 -0400
1064@@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068+ if ((long)n < 0)
1069+ return n;
1070+
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078+ if ((long)n < 0)
1079+ return n;
1080+
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084diff -urNp linux-3.0.7/arch/mips/include/asm/elf.h linux-3.0.7/arch/mips/include/asm/elf.h
1085--- linux-3.0.7/arch/mips/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1086+++ linux-3.0.7/arch/mips/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1087@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091+#ifdef CONFIG_PAX_ASLR
1092+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093+
1094+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096+#endif
1097+
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103-struct mm_struct;
1104-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105-#define arch_randomize_brk arch_randomize_brk
1106-
1107 #endif /* _ASM_ELF_H */
1108diff -urNp linux-3.0.7/arch/mips/include/asm/page.h linux-3.0.7/arch/mips/include/asm/page.h
1109--- linux-3.0.7/arch/mips/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1110+++ linux-3.0.7/arch/mips/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1111@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120diff -urNp linux-3.0.7/arch/mips/include/asm/system.h linux-3.0.7/arch/mips/include/asm/system.h
1121--- linux-3.0.7/arch/mips/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1122+++ linux-3.0.7/arch/mips/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127-extern unsigned long arch_align_stack(unsigned long sp);
1128+#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c
1132--- linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-07-21 22:17:23.000000000 -0400
1133+++ linux-3.0.7/arch/mips/kernel/binfmt_elfn32.c 2011-08-23 21:47:55.000000000 -0400
1134@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138+#ifdef CONFIG_PAX_ASLR
1139+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140+
1141+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143+#endif
1144+
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148diff -urNp linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c
1149--- linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-07-21 22:17:23.000000000 -0400
1150+++ linux-3.0.7/arch/mips/kernel/binfmt_elfo32.c 2011-08-23 21:47:55.000000000 -0400
1151@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155+#ifdef CONFIG_PAX_ASLR
1156+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157+
1158+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160+#endif
1161+
1162 #include <asm/processor.h>
1163
1164 /*
1165diff -urNp linux-3.0.7/arch/mips/kernel/process.c linux-3.0.7/arch/mips/kernel/process.c
1166--- linux-3.0.7/arch/mips/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
1167+++ linux-3.0.7/arch/mips/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
1168@@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172-
1173-/*
1174- * Don't forget that the stack pointer must be aligned on a 8 bytes
1175- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176- */
1177-unsigned long arch_align_stack(unsigned long sp)
1178-{
1179- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180- sp -= get_random_int() & ~PAGE_MASK;
1181-
1182- return sp & ALMASK;
1183-}
1184diff -urNp linux-3.0.7/arch/mips/mm/fault.c linux-3.0.7/arch/mips/mm/fault.c
1185--- linux-3.0.7/arch/mips/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1186+++ linux-3.0.7/arch/mips/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1187@@ -28,6 +28,23 @@
1188 #include <asm/highmem.h> /* For VMALLOC_END */
1189 #include <linux/kdebug.h>
1190
1191+#ifdef CONFIG_PAX_PAGEEXEC
1192+void pax_report_insns(void *pc, void *sp)
1193+{
1194+ unsigned long i;
1195+
1196+ printk(KERN_ERR "PAX: bytes at PC: ");
1197+ for (i = 0; i < 5; i++) {
1198+ unsigned int c;
1199+ if (get_user(c, (unsigned int *)pc+i))
1200+ printk(KERN_CONT "???????? ");
1201+ else
1202+ printk(KERN_CONT "%08x ", c);
1203+ }
1204+ printk("\n");
1205+}
1206+#endif
1207+
1208 /*
1209 * This routine handles page faults. It determines the address,
1210 * and the problem, and then passes it off to one of the appropriate
1211diff -urNp linux-3.0.7/arch/mips/mm/mmap.c linux-3.0.7/arch/mips/mm/mmap.c
1212--- linux-3.0.7/arch/mips/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
1213+++ linux-3.0.7/arch/mips/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
1214@@ -48,14 +48,18 @@ unsigned long arch_get_unmapped_area(str
1215 do_color_align = 0;
1216 if (filp || (flags & MAP_SHARED))
1217 do_color_align = 1;
1218+
1219+#ifdef CONFIG_PAX_RANDMMAP
1220+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1221+#endif
1222+
1223 if (addr) {
1224 if (do_color_align)
1225 addr = COLOUR_ALIGN(addr, pgoff);
1226 else
1227 addr = PAGE_ALIGN(addr);
1228 vmm = find_vma(current->mm, addr);
1229- if (TASK_SIZE - len >= addr &&
1230- (!vmm || addr + len <= vmm->vm_start))
1231+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1232 return addr;
1233 }
1234 addr = current->mm->mmap_base;
1235@@ -68,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
1236 /* At this point: (!vmm || addr < vmm->vm_end). */
1237 if (TASK_SIZE - len < addr)
1238 return -ENOMEM;
1239- if (!vmm || addr + len <= vmm->vm_start)
1240+ if (check_heap_stack_gap(vmm, addr, len))
1241 return addr;
1242 addr = vmm->vm_end;
1243 if (do_color_align)
1244@@ -93,30 +97,3 @@ void arch_pick_mmap_layout(struct mm_str
1245 mm->get_unmapped_area = arch_get_unmapped_area;
1246 mm->unmap_area = arch_unmap_area;
1247 }
1248-
1249-static inline unsigned long brk_rnd(void)
1250-{
1251- unsigned long rnd = get_random_int();
1252-
1253- rnd = rnd << PAGE_SHIFT;
1254- /* 8MB for 32bit, 256MB for 64bit */
1255- if (TASK_IS_32BIT_ADDR)
1256- rnd = rnd & 0x7ffffful;
1257- else
1258- rnd = rnd & 0xffffffful;
1259-
1260- return rnd;
1261-}
1262-
1263-unsigned long arch_randomize_brk(struct mm_struct *mm)
1264-{
1265- unsigned long base = mm->brk;
1266- unsigned long ret;
1267-
1268- ret = PAGE_ALIGN(base + brk_rnd());
1269-
1270- if (ret < mm->brk)
1271- return mm->brk;
1272-
1273- return ret;
1274-}
1275diff -urNp linux-3.0.7/arch/parisc/include/asm/elf.h linux-3.0.7/arch/parisc/include/asm/elf.h
1276--- linux-3.0.7/arch/parisc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1277+++ linux-3.0.7/arch/parisc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1278@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1279
1280 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1281
1282+#ifdef CONFIG_PAX_ASLR
1283+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1284+
1285+#define PAX_DELTA_MMAP_LEN 16
1286+#define PAX_DELTA_STACK_LEN 16
1287+#endif
1288+
1289 /* This yields a mask that user programs can use to figure out what
1290 instruction set this CPU supports. This could be done in user space,
1291 but it's not easy, and we've already done it here. */
1292diff -urNp linux-3.0.7/arch/parisc/include/asm/pgtable.h linux-3.0.7/arch/parisc/include/asm/pgtable.h
1293--- linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1294+++ linux-3.0.7/arch/parisc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1295@@ -210,6 +210,17 @@ struct vm_area_struct;
1296 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1297 #define PAGE_COPY PAGE_EXECREAD
1298 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1299+
1300+#ifdef CONFIG_PAX_PAGEEXEC
1301+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1302+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1303+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1304+#else
1305+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1306+# define PAGE_COPY_NOEXEC PAGE_COPY
1307+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1308+#endif
1309+
1310 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1311 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1312 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1313diff -urNp linux-3.0.7/arch/parisc/kernel/module.c linux-3.0.7/arch/parisc/kernel/module.c
1314--- linux-3.0.7/arch/parisc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
1315+++ linux-3.0.7/arch/parisc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
1316@@ -98,16 +98,38 @@
1317
1318 /* three functions to determine where in the module core
1319 * or init pieces the location is */
1320+static inline int in_init_rx(struct module *me, void *loc)
1321+{
1322+ return (loc >= me->module_init_rx &&
1323+ loc < (me->module_init_rx + me->init_size_rx));
1324+}
1325+
1326+static inline int in_init_rw(struct module *me, void *loc)
1327+{
1328+ return (loc >= me->module_init_rw &&
1329+ loc < (me->module_init_rw + me->init_size_rw));
1330+}
1331+
1332 static inline int in_init(struct module *me, void *loc)
1333 {
1334- return (loc >= me->module_init &&
1335- loc <= (me->module_init + me->init_size));
1336+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1337+}
1338+
1339+static inline int in_core_rx(struct module *me, void *loc)
1340+{
1341+ return (loc >= me->module_core_rx &&
1342+ loc < (me->module_core_rx + me->core_size_rx));
1343+}
1344+
1345+static inline int in_core_rw(struct module *me, void *loc)
1346+{
1347+ return (loc >= me->module_core_rw &&
1348+ loc < (me->module_core_rw + me->core_size_rw));
1349 }
1350
1351 static inline int in_core(struct module *me, void *loc)
1352 {
1353- return (loc >= me->module_core &&
1354- loc <= (me->module_core + me->core_size));
1355+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1356 }
1357
1358 static inline int in_local(struct module *me, void *loc)
1359@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1360 }
1361
1362 /* align things a bit */
1363- me->core_size = ALIGN(me->core_size, 16);
1364- me->arch.got_offset = me->core_size;
1365- me->core_size += gots * sizeof(struct got_entry);
1366-
1367- me->core_size = ALIGN(me->core_size, 16);
1368- me->arch.fdesc_offset = me->core_size;
1369- me->core_size += fdescs * sizeof(Elf_Fdesc);
1370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1371+ me->arch.got_offset = me->core_size_rw;
1372+ me->core_size_rw += gots * sizeof(struct got_entry);
1373+
1374+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1375+ me->arch.fdesc_offset = me->core_size_rw;
1376+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1377
1378 me->arch.got_max = gots;
1379 me->arch.fdesc_max = fdescs;
1380@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1381
1382 BUG_ON(value == 0);
1383
1384- got = me->module_core + me->arch.got_offset;
1385+ got = me->module_core_rw + me->arch.got_offset;
1386 for (i = 0; got[i].addr; i++)
1387 if (got[i].addr == value)
1388 goto out;
1389@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1390 #ifdef CONFIG_64BIT
1391 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1392 {
1393- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1394+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1395
1396 if (!value) {
1397 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1398@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1399
1400 /* Create new one */
1401 fdesc->addr = value;
1402- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1403+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1404 return (Elf_Addr)fdesc;
1405 }
1406 #endif /* CONFIG_64BIT */
1407@@ -857,7 +879,7 @@ register_unwind_table(struct module *me,
1408
1409 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1410 end = table + sechdrs[me->arch.unwind_section].sh_size;
1411- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1412+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1413
1414 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1415 me->arch.unwind_section, table, end, gp);
1416diff -urNp linux-3.0.7/arch/parisc/kernel/sys_parisc.c linux-3.0.7/arch/parisc/kernel/sys_parisc.c
1417--- linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-07-21 22:17:23.000000000 -0400
1418+++ linux-3.0.7/arch/parisc/kernel/sys_parisc.c 2011-08-23 21:47:55.000000000 -0400
1419@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1420 /* At this point: (!vma || addr < vma->vm_end). */
1421 if (TASK_SIZE - len < addr)
1422 return -ENOMEM;
1423- if (!vma || addr + len <= vma->vm_start)
1424+ if (check_heap_stack_gap(vma, addr, len))
1425 return addr;
1426 addr = vma->vm_end;
1427 }
1428@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1429 /* At this point: (!vma || addr < vma->vm_end). */
1430 if (TASK_SIZE - len < addr)
1431 return -ENOMEM;
1432- if (!vma || addr + len <= vma->vm_start)
1433+ if (check_heap_stack_gap(vma, addr, len))
1434 return addr;
1435 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1436 if (addr < vma->vm_end) /* handle wraparound */
1437@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1438 if (flags & MAP_FIXED)
1439 return addr;
1440 if (!addr)
1441- addr = TASK_UNMAPPED_BASE;
1442+ addr = current->mm->mmap_base;
1443
1444 if (filp) {
1445 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1446diff -urNp linux-3.0.7/arch/parisc/kernel/traps.c linux-3.0.7/arch/parisc/kernel/traps.c
1447--- linux-3.0.7/arch/parisc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
1448+++ linux-3.0.7/arch/parisc/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
1449@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1450
1451 down_read(&current->mm->mmap_sem);
1452 vma = find_vma(current->mm,regs->iaoq[0]);
1453- if (vma && (regs->iaoq[0] >= vma->vm_start)
1454- && (vma->vm_flags & VM_EXEC)) {
1455-
1456+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1457 fault_address = regs->iaoq[0];
1458 fault_space = regs->iasq[0];
1459
1460diff -urNp linux-3.0.7/arch/parisc/mm/fault.c linux-3.0.7/arch/parisc/mm/fault.c
1461--- linux-3.0.7/arch/parisc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
1462+++ linux-3.0.7/arch/parisc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
1463@@ -15,6 +15,7 @@
1464 #include <linux/sched.h>
1465 #include <linux/interrupt.h>
1466 #include <linux/module.h>
1467+#include <linux/unistd.h>
1468
1469 #include <asm/uaccess.h>
1470 #include <asm/traps.h>
1471@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1472 static unsigned long
1473 parisc_acctyp(unsigned long code, unsigned int inst)
1474 {
1475- if (code == 6 || code == 16)
1476+ if (code == 6 || code == 7 || code == 16)
1477 return VM_EXEC;
1478
1479 switch (inst & 0xf0000000) {
1480@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1481 }
1482 #endif
1483
1484+#ifdef CONFIG_PAX_PAGEEXEC
1485+/*
1486+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1487+ *
1488+ * returns 1 when task should be killed
1489+ * 2 when rt_sigreturn trampoline was detected
1490+ * 3 when unpatched PLT trampoline was detected
1491+ */
1492+static int pax_handle_fetch_fault(struct pt_regs *regs)
1493+{
1494+
1495+#ifdef CONFIG_PAX_EMUPLT
1496+ int err;
1497+
1498+ do { /* PaX: unpatched PLT emulation */
1499+ unsigned int bl, depwi;
1500+
1501+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1502+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1503+
1504+ if (err)
1505+ break;
1506+
1507+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1508+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1509+
1510+ err = get_user(ldw, (unsigned int *)addr);
1511+ err |= get_user(bv, (unsigned int *)(addr+4));
1512+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1513+
1514+ if (err)
1515+ break;
1516+
1517+ if (ldw == 0x0E801096U &&
1518+ bv == 0xEAC0C000U &&
1519+ ldw2 == 0x0E881095U)
1520+ {
1521+ unsigned int resolver, map;
1522+
1523+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1524+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1525+ if (err)
1526+ break;
1527+
1528+ regs->gr[20] = instruction_pointer(regs)+8;
1529+ regs->gr[21] = map;
1530+ regs->gr[22] = resolver;
1531+ regs->iaoq[0] = resolver | 3UL;
1532+ regs->iaoq[1] = regs->iaoq[0] + 4;
1533+ return 3;
1534+ }
1535+ }
1536+ } while (0);
1537+#endif
1538+
1539+#ifdef CONFIG_PAX_EMUTRAMP
1540+
1541+#ifndef CONFIG_PAX_EMUSIGRT
1542+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1543+ return 1;
1544+#endif
1545+
1546+ do { /* PaX: rt_sigreturn emulation */
1547+ unsigned int ldi1, ldi2, bel, nop;
1548+
1549+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1550+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1551+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1552+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1553+
1554+ if (err)
1555+ break;
1556+
1557+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1558+ ldi2 == 0x3414015AU &&
1559+ bel == 0xE4008200U &&
1560+ nop == 0x08000240U)
1561+ {
1562+ regs->gr[25] = (ldi1 & 2) >> 1;
1563+ regs->gr[20] = __NR_rt_sigreturn;
1564+ regs->gr[31] = regs->iaoq[1] + 16;
1565+ regs->sr[0] = regs->iasq[1];
1566+ regs->iaoq[0] = 0x100UL;
1567+ regs->iaoq[1] = regs->iaoq[0] + 4;
1568+ regs->iasq[0] = regs->sr[2];
1569+ regs->iasq[1] = regs->sr[2];
1570+ return 2;
1571+ }
1572+ } while (0);
1573+#endif
1574+
1575+ return 1;
1576+}
1577+
1578+void pax_report_insns(void *pc, void *sp)
1579+{
1580+ unsigned long i;
1581+
1582+ printk(KERN_ERR "PAX: bytes at PC: ");
1583+ for (i = 0; i < 5; i++) {
1584+ unsigned int c;
1585+ if (get_user(c, (unsigned int *)pc+i))
1586+ printk(KERN_CONT "???????? ");
1587+ else
1588+ printk(KERN_CONT "%08x ", c);
1589+ }
1590+ printk("\n");
1591+}
1592+#endif
1593+
1594 int fixup_exception(struct pt_regs *regs)
1595 {
1596 const struct exception_table_entry *fix;
1597@@ -192,8 +303,33 @@ good_area:
1598
1599 acc_type = parisc_acctyp(code,regs->iir);
1600
1601- if ((vma->vm_flags & acc_type) != acc_type)
1602+ if ((vma->vm_flags & acc_type) != acc_type) {
1603+
1604+#ifdef CONFIG_PAX_PAGEEXEC
1605+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1606+ (address & ~3UL) == instruction_pointer(regs))
1607+ {
1608+ up_read(&mm->mmap_sem);
1609+ switch (pax_handle_fetch_fault(regs)) {
1610+
1611+#ifdef CONFIG_PAX_EMUPLT
1612+ case 3:
1613+ return;
1614+#endif
1615+
1616+#ifdef CONFIG_PAX_EMUTRAMP
1617+ case 2:
1618+ return;
1619+#endif
1620+
1621+ }
1622+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1623+ do_group_exit(SIGKILL);
1624+ }
1625+#endif
1626+
1627 goto bad_area;
1628+ }
1629
1630 /*
1631 * If for any reason at all we couldn't handle the fault, make
1632diff -urNp linux-3.0.7/arch/powerpc/include/asm/elf.h linux-3.0.7/arch/powerpc/include/asm/elf.h
1633--- linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
1634+++ linux-3.0.7/arch/powerpc/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
1635@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1636 the loader. We need to make sure that it is out of the way of the program
1637 that it will "exec", and that there is sufficient room for the brk. */
1638
1639-extern unsigned long randomize_et_dyn(unsigned long base);
1640-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1641+#define ELF_ET_DYN_BASE (0x20000000)
1642+
1643+#ifdef CONFIG_PAX_ASLR
1644+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1645+
1646+#ifdef __powerpc64__
1647+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1648+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1649+#else
1650+#define PAX_DELTA_MMAP_LEN 15
1651+#define PAX_DELTA_STACK_LEN 15
1652+#endif
1653+#endif
1654
1655 /*
1656 * Our registers are always unsigned longs, whether we're a 32 bit
1657@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1658 (0x7ff >> (PAGE_SHIFT - 12)) : \
1659 (0x3ffff >> (PAGE_SHIFT - 12)))
1660
1661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1662-#define arch_randomize_brk arch_randomize_brk
1663-
1664 #endif /* __KERNEL__ */
1665
1666 /*
1667diff -urNp linux-3.0.7/arch/powerpc/include/asm/kmap_types.h linux-3.0.7/arch/powerpc/include/asm/kmap_types.h
1668--- linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
1669+++ linux-3.0.7/arch/powerpc/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
1670@@ -27,6 +27,7 @@ enum km_type {
1671 KM_PPC_SYNC_PAGE,
1672 KM_PPC_SYNC_ICACHE,
1673 KM_KDB,
1674+ KM_CLEARPAGE,
1675 KM_TYPE_NR
1676 };
1677
1678diff -urNp linux-3.0.7/arch/powerpc/include/asm/mman.h linux-3.0.7/arch/powerpc/include/asm/mman.h
1679--- linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
1680+++ linux-3.0.7/arch/powerpc/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
1681@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1682 }
1683 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1684
1685-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1686+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1687 {
1688 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1689 }
1690diff -urNp linux-3.0.7/arch/powerpc/include/asm/page_64.h linux-3.0.7/arch/powerpc/include/asm/page_64.h
1691--- linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-07-21 22:17:23.000000000 -0400
1692+++ linux-3.0.7/arch/powerpc/include/asm/page_64.h 2011-08-23 21:47:55.000000000 -0400
1693@@ -155,15 +155,18 @@ do { \
1694 * stack by default, so in the absence of a PT_GNU_STACK program header
1695 * we turn execute permission off.
1696 */
1697-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1698- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1699+#define VM_STACK_DEFAULT_FLAGS32 \
1700+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1701+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1702
1703 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1704 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1705
1706+#ifndef CONFIG_PAX_PAGEEXEC
1707 #define VM_STACK_DEFAULT_FLAGS \
1708 (is_32bit_task() ? \
1709 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1710+#endif
1711
1712 #include <asm-generic/getorder.h>
1713
1714diff -urNp linux-3.0.7/arch/powerpc/include/asm/page.h linux-3.0.7/arch/powerpc/include/asm/page.h
1715--- linux-3.0.7/arch/powerpc/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
1716+++ linux-3.0.7/arch/powerpc/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
1717@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1718 * and needs to be executable. This means the whole heap ends
1719 * up being executable.
1720 */
1721-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1722- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1723+#define VM_DATA_DEFAULT_FLAGS32 \
1724+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1725+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1726
1727 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1728 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1729@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1730 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1731 #endif
1732
1733+#define ktla_ktva(addr) (addr)
1734+#define ktva_ktla(addr) (addr)
1735+
1736 #ifndef __ASSEMBLY__
1737
1738 #undef STRICT_MM_TYPECHECKS
1739diff -urNp linux-3.0.7/arch/powerpc/include/asm/pgtable.h linux-3.0.7/arch/powerpc/include/asm/pgtable.h
1740--- linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
1741+++ linux-3.0.7/arch/powerpc/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
1742@@ -2,6 +2,7 @@
1743 #define _ASM_POWERPC_PGTABLE_H
1744 #ifdef __KERNEL__
1745
1746+#include <linux/const.h>
1747 #ifndef __ASSEMBLY__
1748 #include <asm/processor.h> /* For TASK_SIZE */
1749 #include <asm/mmu.h>
1750diff -urNp linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h
1751--- linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-07-21 22:17:23.000000000 -0400
1752+++ linux-3.0.7/arch/powerpc/include/asm/pte-hash32.h 2011-08-23 21:47:55.000000000 -0400
1753@@ -21,6 +21,7 @@
1754 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1755 #define _PAGE_USER 0x004 /* usermode access allowed */
1756 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1757+#define _PAGE_EXEC _PAGE_GUARDED
1758 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1759 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1760 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1761diff -urNp linux-3.0.7/arch/powerpc/include/asm/reg.h linux-3.0.7/arch/powerpc/include/asm/reg.h
1762--- linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-07-21 22:17:23.000000000 -0400
1763+++ linux-3.0.7/arch/powerpc/include/asm/reg.h 2011-08-23 21:47:55.000000000 -0400
1764@@ -209,6 +209,7 @@
1765 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1766 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1767 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1768+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1769 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1770 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1771 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1772diff -urNp linux-3.0.7/arch/powerpc/include/asm/system.h linux-3.0.7/arch/powerpc/include/asm/system.h
1773--- linux-3.0.7/arch/powerpc/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
1774+++ linux-3.0.7/arch/powerpc/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
1775@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1776 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1777 #endif
1778
1779-extern unsigned long arch_align_stack(unsigned long sp);
1780+#define arch_align_stack(x) ((x) & ~0xfUL)
1781
1782 /* Used in very early kernel initialization. */
1783 extern unsigned long reloc_offset(void);
1784diff -urNp linux-3.0.7/arch/powerpc/include/asm/uaccess.h linux-3.0.7/arch/powerpc/include/asm/uaccess.h
1785--- linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
1786+++ linux-3.0.7/arch/powerpc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
1787@@ -13,6 +13,8 @@
1788 #define VERIFY_READ 0
1789 #define VERIFY_WRITE 1
1790
1791+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1792+
1793 /*
1794 * The fs value determines whether argument validity checking should be
1795 * performed or not. If get_fs() == USER_DS, checking is performed, with
1796@@ -327,52 +329,6 @@ do { \
1797 extern unsigned long __copy_tofrom_user(void __user *to,
1798 const void __user *from, unsigned long size);
1799
1800-#ifndef __powerpc64__
1801-
1802-static inline unsigned long copy_from_user(void *to,
1803- const void __user *from, unsigned long n)
1804-{
1805- unsigned long over;
1806-
1807- if (access_ok(VERIFY_READ, from, n))
1808- return __copy_tofrom_user((__force void __user *)to, from, n);
1809- if ((unsigned long)from < TASK_SIZE) {
1810- over = (unsigned long)from + n - TASK_SIZE;
1811- return __copy_tofrom_user((__force void __user *)to, from,
1812- n - over) + over;
1813- }
1814- return n;
1815-}
1816-
1817-static inline unsigned long copy_to_user(void __user *to,
1818- const void *from, unsigned long n)
1819-{
1820- unsigned long over;
1821-
1822- if (access_ok(VERIFY_WRITE, to, n))
1823- return __copy_tofrom_user(to, (__force void __user *)from, n);
1824- if ((unsigned long)to < TASK_SIZE) {
1825- over = (unsigned long)to + n - TASK_SIZE;
1826- return __copy_tofrom_user(to, (__force void __user *)from,
1827- n - over) + over;
1828- }
1829- return n;
1830-}
1831-
1832-#else /* __powerpc64__ */
1833-
1834-#define __copy_in_user(to, from, size) \
1835- __copy_tofrom_user((to), (from), (size))
1836-
1837-extern unsigned long copy_from_user(void *to, const void __user *from,
1838- unsigned long n);
1839-extern unsigned long copy_to_user(void __user *to, const void *from,
1840- unsigned long n);
1841-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1842- unsigned long n);
1843-
1844-#endif /* __powerpc64__ */
1845-
1846 static inline unsigned long __copy_from_user_inatomic(void *to,
1847 const void __user *from, unsigned long n)
1848 {
1849@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1850 if (ret == 0)
1851 return 0;
1852 }
1853+
1854+ if (!__builtin_constant_p(n))
1855+ check_object_size(to, n, false);
1856+
1857 return __copy_tofrom_user((__force void __user *)to, from, n);
1858 }
1859
1860@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1861 if (ret == 0)
1862 return 0;
1863 }
1864+
1865+ if (!__builtin_constant_p(n))
1866+ check_object_size(from, n, true);
1867+
1868 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1869 }
1870
1871@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1872 return __copy_to_user_inatomic(to, from, size);
1873 }
1874
1875+#ifndef __powerpc64__
1876+
1877+static inline unsigned long __must_check copy_from_user(void *to,
1878+ const void __user *from, unsigned long n)
1879+{
1880+ unsigned long over;
1881+
1882+ if ((long)n < 0)
1883+ return n;
1884+
1885+ if (access_ok(VERIFY_READ, from, n)) {
1886+ if (!__builtin_constant_p(n))
1887+ check_object_size(to, n, false);
1888+ return __copy_tofrom_user((__force void __user *)to, from, n);
1889+ }
1890+ if ((unsigned long)from < TASK_SIZE) {
1891+ over = (unsigned long)from + n - TASK_SIZE;
1892+ if (!__builtin_constant_p(n - over))
1893+ check_object_size(to, n - over, false);
1894+ return __copy_tofrom_user((__force void __user *)to, from,
1895+ n - over) + over;
1896+ }
1897+ return n;
1898+}
1899+
1900+static inline unsigned long __must_check copy_to_user(void __user *to,
1901+ const void *from, unsigned long n)
1902+{
1903+ unsigned long over;
1904+
1905+ if ((long)n < 0)
1906+ return n;
1907+
1908+ if (access_ok(VERIFY_WRITE, to, n)) {
1909+ if (!__builtin_constant_p(n))
1910+ check_object_size(from, n, true);
1911+ return __copy_tofrom_user(to, (__force void __user *)from, n);
1912+ }
1913+ if ((unsigned long)to < TASK_SIZE) {
1914+ over = (unsigned long)to + n - TASK_SIZE;
1915+ if (!__builtin_constant_p(n))
1916+ check_object_size(from, n - over, true);
1917+ return __copy_tofrom_user(to, (__force void __user *)from,
1918+ n - over) + over;
1919+ }
1920+ return n;
1921+}
1922+
1923+#else /* __powerpc64__ */
1924+
1925+#define __copy_in_user(to, from, size) \
1926+ __copy_tofrom_user((to), (from), (size))
1927+
1928+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1929+{
1930+ if ((long)n < 0 || n > INT_MAX)
1931+ return n;
1932+
1933+ if (!__builtin_constant_p(n))
1934+ check_object_size(to, n, false);
1935+
1936+ if (likely(access_ok(VERIFY_READ, from, n)))
1937+ n = __copy_from_user(to, from, n);
1938+ else
1939+ memset(to, 0, n);
1940+ return n;
1941+}
1942+
1943+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1944+{
1945+ if ((long)n < 0 || n > INT_MAX)
1946+ return n;
1947+
1948+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1949+ if (!__builtin_constant_p(n))
1950+ check_object_size(from, n, true);
1951+ n = __copy_to_user(to, from, n);
1952+ }
1953+ return n;
1954+}
1955+
1956+extern unsigned long copy_in_user(void __user *to, const void __user *from,
1957+ unsigned long n);
1958+
1959+#endif /* __powerpc64__ */
1960+
1961 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1962
1963 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1964diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S
1965--- linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-07-21 22:17:23.000000000 -0400
1966+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64e.S 2011-08-23 21:47:55.000000000 -0400
1967@@ -567,6 +567,7 @@ storage_fault_common:
1968 std r14,_DAR(r1)
1969 std r15,_DSISR(r1)
1970 addi r3,r1,STACK_FRAME_OVERHEAD
1971+ bl .save_nvgprs
1972 mr r4,r14
1973 mr r5,r15
1974 ld r14,PACA_EXGEN+EX_R14(r13)
1975@@ -576,8 +577,7 @@ storage_fault_common:
1976 cmpdi r3,0
1977 bne- 1f
1978 b .ret_from_except_lite
1979-1: bl .save_nvgprs
1980- mr r5,r3
1981+1: mr r5,r3
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 ld r4,_DAR(r1)
1984 bl .bad_page_fault
1985diff -urNp linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S
1986--- linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-07-21 22:17:23.000000000 -0400
1987+++ linux-3.0.7/arch/powerpc/kernel/exceptions-64s.S 2011-08-23 21:47:55.000000000 -0400
1988@@ -956,10 +956,10 @@ handle_page_fault:
1989 11: ld r4,_DAR(r1)
1990 ld r5,_DSISR(r1)
1991 addi r3,r1,STACK_FRAME_OVERHEAD
1992+ bl .save_nvgprs
1993 bl .do_page_fault
1994 cmpdi r3,0
1995 beq+ 13f
1996- bl .save_nvgprs
1997 mr r5,r3
1998 addi r3,r1,STACK_FRAME_OVERHEAD
1999 lwz r4,_DAR(r1)
2000diff -urNp linux-3.0.7/arch/powerpc/kernel/module_32.c linux-3.0.7/arch/powerpc/kernel/module_32.c
2001--- linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-07-21 22:17:23.000000000 -0400
2002+++ linux-3.0.7/arch/powerpc/kernel/module_32.c 2011-08-23 21:47:55.000000000 -0400
2003@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2004 me->arch.core_plt_section = i;
2005 }
2006 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2007- printk("Module doesn't contain .plt or .init.plt sections.\n");
2008+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2009 return -ENOEXEC;
2010 }
2011
2012@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2013
2014 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2015 /* Init, or core PLT? */
2016- if (location >= mod->module_core
2017- && location < mod->module_core + mod->core_size)
2018+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2019+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2020 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2021- else
2022+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2023+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2024 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2025+ else {
2026+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2027+ return ~0UL;
2028+ }
2029
2030 /* Find this entry, or if that fails, the next avail. entry */
2031 while (entry->jump[0]) {
2032diff -urNp linux-3.0.7/arch/powerpc/kernel/module.c linux-3.0.7/arch/powerpc/kernel/module.c
2033--- linux-3.0.7/arch/powerpc/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2034+++ linux-3.0.7/arch/powerpc/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2035@@ -31,11 +31,24 @@
2036
2037 LIST_HEAD(module_bug_list);
2038
2039+#ifdef CONFIG_PAX_KERNEXEC
2040 void *module_alloc(unsigned long size)
2041 {
2042 if (size == 0)
2043 return NULL;
2044
2045+ return vmalloc(size);
2046+}
2047+
2048+void *module_alloc_exec(unsigned long size)
2049+#else
2050+void *module_alloc(unsigned long size)
2051+#endif
2052+
2053+{
2054+ if (size == 0)
2055+ return NULL;
2056+
2057 return vmalloc_exec(size);
2058 }
2059
2060@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2061 vfree(module_region);
2062 }
2063
2064+#ifdef CONFIG_PAX_KERNEXEC
2065+void module_free_exec(struct module *mod, void *module_region)
2066+{
2067+ module_free(mod, module_region);
2068+}
2069+#endif
2070+
2071 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2072 const Elf_Shdr *sechdrs,
2073 const char *name)
2074diff -urNp linux-3.0.7/arch/powerpc/kernel/process.c linux-3.0.7/arch/powerpc/kernel/process.c
2075--- linux-3.0.7/arch/powerpc/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2076+++ linux-3.0.7/arch/powerpc/kernel/process.c 2011-08-23 21:48:14.000000000 -0400
2077@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
2078 * Lookup NIP late so we have the best change of getting the
2079 * above info out without failing
2080 */
2081- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2082- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2083+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2084+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2085 #endif
2086 show_stack(current, (unsigned long *) regs->gpr[1]);
2087 if (!user_mode(regs))
2088@@ -1183,10 +1183,10 @@ void show_stack(struct task_struct *tsk,
2089 newsp = stack[0];
2090 ip = stack[STACK_FRAME_LR_SAVE];
2091 if (!firstframe || ip != lr) {
2092- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2093+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2095 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2096- printk(" (%pS)",
2097+ printk(" (%pA)",
2098 (void *)current->ret_stack[curr_frame].ret);
2099 curr_frame--;
2100 }
2101@@ -1206,7 +1206,7 @@ void show_stack(struct task_struct *tsk,
2102 struct pt_regs *regs = (struct pt_regs *)
2103 (sp + STACK_FRAME_OVERHEAD);
2104 lr = regs->link;
2105- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2106+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2107 regs->trap, (void *)regs->nip, (void *)lr);
2108 firstframe = 1;
2109 }
2110@@ -1281,58 +1281,3 @@ void thread_info_cache_init(void)
2111 }
2112
2113 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2114-
2115-unsigned long arch_align_stack(unsigned long sp)
2116-{
2117- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2118- sp -= get_random_int() & ~PAGE_MASK;
2119- return sp & ~0xf;
2120-}
2121-
2122-static inline unsigned long brk_rnd(void)
2123-{
2124- unsigned long rnd = 0;
2125-
2126- /* 8MB for 32bit, 1GB for 64bit */
2127- if (is_32bit_task())
2128- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2129- else
2130- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2131-
2132- return rnd << PAGE_SHIFT;
2133-}
2134-
2135-unsigned long arch_randomize_brk(struct mm_struct *mm)
2136-{
2137- unsigned long base = mm->brk;
2138- unsigned long ret;
2139-
2140-#ifdef CONFIG_PPC_STD_MMU_64
2141- /*
2142- * If we are using 1TB segments and we are allowed to randomise
2143- * the heap, we can put it above 1TB so it is backed by a 1TB
2144- * segment. Otherwise the heap will be in the bottom 1TB
2145- * which always uses 256MB segments and this may result in a
2146- * performance penalty.
2147- */
2148- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2149- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2150-#endif
2151-
2152- ret = PAGE_ALIGN(base + brk_rnd());
2153-
2154- if (ret < mm->brk)
2155- return mm->brk;
2156-
2157- return ret;
2158-}
2159-
2160-unsigned long randomize_et_dyn(unsigned long base)
2161-{
2162- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2163-
2164- if (ret < base)
2165- return base;
2166-
2167- return ret;
2168-}
2169diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_32.c linux-3.0.7/arch/powerpc/kernel/signal_32.c
2170--- linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-07-21 22:17:23.000000000 -0400
2171+++ linux-3.0.7/arch/powerpc/kernel/signal_32.c 2011-08-23 21:47:55.000000000 -0400
2172@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2173 /* Save user registers on the stack */
2174 frame = &rt_sf->uc.uc_mcontext;
2175 addr = frame;
2176- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2177+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2178 if (save_user_regs(regs, frame, 0, 1))
2179 goto badframe;
2180 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2181diff -urNp linux-3.0.7/arch/powerpc/kernel/signal_64.c linux-3.0.7/arch/powerpc/kernel/signal_64.c
2182--- linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-07-21 22:17:23.000000000 -0400
2183+++ linux-3.0.7/arch/powerpc/kernel/signal_64.c 2011-08-23 21:47:55.000000000 -0400
2184@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2185 current->thread.fpscr.val = 0;
2186
2187 /* Set up to return from userspace. */
2188- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2189+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2190 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2191 } else {
2192 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2193diff -urNp linux-3.0.7/arch/powerpc/kernel/traps.c linux-3.0.7/arch/powerpc/kernel/traps.c
2194--- linux-3.0.7/arch/powerpc/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
2195+++ linux-3.0.7/arch/powerpc/kernel/traps.c 2011-08-23 21:48:14.000000000 -0400
2196@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2197 static inline void pmac_backlight_unblank(void) { }
2198 #endif
2199
2200+extern void gr_handle_kernel_exploit(void);
2201+
2202 int die(const char *str, struct pt_regs *regs, long err)
2203 {
2204 static struct {
2205@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2206 if (panic_on_oops)
2207 panic("Fatal exception");
2208
2209+ gr_handle_kernel_exploit();
2210+
2211 oops_exit();
2212 do_exit(err);
2213
2214diff -urNp linux-3.0.7/arch/powerpc/kernel/vdso.c linux-3.0.7/arch/powerpc/kernel/vdso.c
2215--- linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-07-21 22:17:23.000000000 -0400
2216+++ linux-3.0.7/arch/powerpc/kernel/vdso.c 2011-08-23 21:47:55.000000000 -0400
2217@@ -36,6 +36,7 @@
2218 #include <asm/firmware.h>
2219 #include <asm/vdso.h>
2220 #include <asm/vdso_datapage.h>
2221+#include <asm/mman.h>
2222
2223 #include "setup.h"
2224
2225@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = VDSO32_MBASE;
2227 #endif
2228
2229- current->mm->context.vdso_base = 0;
2230+ current->mm->context.vdso_base = ~0UL;
2231
2232 /* vDSO has a problem and was disabled, just don't "enable" it for the
2233 * process
2234@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2235 vdso_base = get_unmapped_area(NULL, vdso_base,
2236 (vdso_pages << PAGE_SHIFT) +
2237 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2238- 0, 0);
2239+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2240 if (IS_ERR_VALUE(vdso_base)) {
2241 rc = vdso_base;
2242 goto fail_mmapsem;
2243diff -urNp linux-3.0.7/arch/powerpc/lib/usercopy_64.c linux-3.0.7/arch/powerpc/lib/usercopy_64.c
2244--- linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
2245+++ linux-3.0.7/arch/powerpc/lib/usercopy_64.c 2011-08-23 21:47:55.000000000 -0400
2246@@ -9,22 +9,6 @@
2247 #include <linux/module.h>
2248 #include <asm/uaccess.h>
2249
2250-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2251-{
2252- if (likely(access_ok(VERIFY_READ, from, n)))
2253- n = __copy_from_user(to, from, n);
2254- else
2255- memset(to, 0, n);
2256- return n;
2257-}
2258-
2259-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2260-{
2261- if (likely(access_ok(VERIFY_WRITE, to, n)))
2262- n = __copy_to_user(to, from, n);
2263- return n;
2264-}
2265-
2266 unsigned long copy_in_user(void __user *to, const void __user *from,
2267 unsigned long n)
2268 {
2269@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2270 return n;
2271 }
2272
2273-EXPORT_SYMBOL(copy_from_user);
2274-EXPORT_SYMBOL(copy_to_user);
2275 EXPORT_SYMBOL(copy_in_user);
2276
2277diff -urNp linux-3.0.7/arch/powerpc/mm/fault.c linux-3.0.7/arch/powerpc/mm/fault.c
2278--- linux-3.0.7/arch/powerpc/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
2279+++ linux-3.0.7/arch/powerpc/mm/fault.c 2011-08-23 21:47:55.000000000 -0400
2280@@ -32,6 +32,10 @@
2281 #include <linux/perf_event.h>
2282 #include <linux/magic.h>
2283 #include <linux/ratelimit.h>
2284+#include <linux/slab.h>
2285+#include <linux/pagemap.h>
2286+#include <linux/compiler.h>
2287+#include <linux/unistd.h>
2288
2289 #include <asm/firmware.h>
2290 #include <asm/page.h>
2291@@ -43,6 +47,7 @@
2292 #include <asm/tlbflush.h>
2293 #include <asm/siginfo.h>
2294 #include <mm/mmu_decl.h>
2295+#include <asm/ptrace.h>
2296
2297 #ifdef CONFIG_KPROBES
2298 static inline int notify_page_fault(struct pt_regs *regs)
2299@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2300 }
2301 #endif
2302
2303+#ifdef CONFIG_PAX_PAGEEXEC
2304+/*
2305+ * PaX: decide what to do with offenders (regs->nip = fault address)
2306+ *
2307+ * returns 1 when task should be killed
2308+ */
2309+static int pax_handle_fetch_fault(struct pt_regs *regs)
2310+{
2311+ return 1;
2312+}
2313+
2314+void pax_report_insns(void *pc, void *sp)
2315+{
2316+ unsigned long i;
2317+
2318+ printk(KERN_ERR "PAX: bytes at PC: ");
2319+ for (i = 0; i < 5; i++) {
2320+ unsigned int c;
2321+ if (get_user(c, (unsigned int __user *)pc+i))
2322+ printk(KERN_CONT "???????? ");
2323+ else
2324+ printk(KERN_CONT "%08x ", c);
2325+ }
2326+ printk("\n");
2327+}
2328+#endif
2329+
2330 /*
2331 * Check whether the instruction at regs->nip is a store using
2332 * an update addressing form which will update r1.
2333@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2334 * indicate errors in DSISR but can validly be set in SRR1.
2335 */
2336 if (trap == 0x400)
2337- error_code &= 0x48200000;
2338+ error_code &= 0x58200000;
2339 else
2340 is_write = error_code & DSISR_ISSTORE;
2341 #else
2342@@ -259,7 +291,7 @@ good_area:
2343 * "undefined". Of those that can be set, this is the only
2344 * one which seems bad.
2345 */
2346- if (error_code & 0x10000000)
2347+ if (error_code & DSISR_GUARDED)
2348 /* Guarded storage error. */
2349 goto bad_area;
2350 #endif /* CONFIG_8xx */
2351@@ -274,7 +306,7 @@ good_area:
2352 * processors use the same I/D cache coherency mechanism
2353 * as embedded.
2354 */
2355- if (error_code & DSISR_PROTFAULT)
2356+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2357 goto bad_area;
2358 #endif /* CONFIG_PPC_STD_MMU */
2359
2360@@ -343,6 +375,23 @@ bad_area:
2361 bad_area_nosemaphore:
2362 /* User mode accesses cause a SIGSEGV */
2363 if (user_mode(regs)) {
2364+
2365+#ifdef CONFIG_PAX_PAGEEXEC
2366+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2367+#ifdef CONFIG_PPC_STD_MMU
2368+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2369+#else
2370+ if (is_exec && regs->nip == address) {
2371+#endif
2372+ switch (pax_handle_fetch_fault(regs)) {
2373+ }
2374+
2375+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2376+ do_group_exit(SIGKILL);
2377+ }
2378+ }
2379+#endif
2380+
2381 _exception(SIGSEGV, regs, code, address);
2382 return 0;
2383 }
2384diff -urNp linux-3.0.7/arch/powerpc/mm/mmap_64.c linux-3.0.7/arch/powerpc/mm/mmap_64.c
2385--- linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-07-21 22:17:23.000000000 -0400
2386+++ linux-3.0.7/arch/powerpc/mm/mmap_64.c 2011-08-23 21:47:55.000000000 -0400
2387@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2388 */
2389 if (mmap_is_legacy()) {
2390 mm->mmap_base = TASK_UNMAPPED_BASE;
2391+
2392+#ifdef CONFIG_PAX_RANDMMAP
2393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2394+ mm->mmap_base += mm->delta_mmap;
2395+#endif
2396+
2397 mm->get_unmapped_area = arch_get_unmapped_area;
2398 mm->unmap_area = arch_unmap_area;
2399 } else {
2400 mm->mmap_base = mmap_base();
2401+
2402+#ifdef CONFIG_PAX_RANDMMAP
2403+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2404+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2405+#endif
2406+
2407 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2408 mm->unmap_area = arch_unmap_area_topdown;
2409 }
2410diff -urNp linux-3.0.7/arch/powerpc/mm/slice.c linux-3.0.7/arch/powerpc/mm/slice.c
2411--- linux-3.0.7/arch/powerpc/mm/slice.c 2011-07-21 22:17:23.000000000 -0400
2412+++ linux-3.0.7/arch/powerpc/mm/slice.c 2011-08-23 21:47:55.000000000 -0400
2413@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2414 if ((mm->task_size - len) < addr)
2415 return 0;
2416 vma = find_vma(mm, addr);
2417- return (!vma || (addr + len) <= vma->vm_start);
2418+ return check_heap_stack_gap(vma, addr, len);
2419 }
2420
2421 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2422@@ -256,7 +256,7 @@ full_search:
2423 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2424 continue;
2425 }
2426- if (!vma || addr + len <= vma->vm_start) {
2427+ if (check_heap_stack_gap(vma, addr, len)) {
2428 /*
2429 * Remember the place where we stopped the search:
2430 */
2431@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2432 }
2433 }
2434
2435- addr = mm->mmap_base;
2436- while (addr > len) {
2437+ if (mm->mmap_base < len)
2438+ addr = -ENOMEM;
2439+ else
2440+ addr = mm->mmap_base - len;
2441+
2442+ while (!IS_ERR_VALUE(addr)) {
2443 /* Go down by chunk size */
2444- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2445+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2446
2447 /* Check for hit with different page size */
2448 mask = slice_range_to_mask(addr, len);
2449@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2450 * return with success:
2451 */
2452 vma = find_vma(mm, addr);
2453- if (!vma || (addr + len) <= vma->vm_start) {
2454+ if (check_heap_stack_gap(vma, addr, len)) {
2455 /* remember the address as a hint for next time */
2456 if (use_cache)
2457 mm->free_area_cache = addr;
2458@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2459 mm->cached_hole_size = vma->vm_start - addr;
2460
2461 /* try just below the current vma->vm_start */
2462- addr = vma->vm_start;
2463+ addr = skip_heap_stack_gap(vma, len);
2464 }
2465
2466 /*
2467@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2468 if (fixed && addr > (mm->task_size - len))
2469 return -EINVAL;
2470
2471+#ifdef CONFIG_PAX_RANDMMAP
2472+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2473+ addr = 0;
2474+#endif
2475+
2476 /* If hint, make sure it matches our alignment restrictions */
2477 if (!fixed && addr) {
2478 addr = _ALIGN_UP(addr, 1ul << pshift);
2479diff -urNp linux-3.0.7/arch/s390/include/asm/elf.h linux-3.0.7/arch/s390/include/asm/elf.h
2480--- linux-3.0.7/arch/s390/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
2481+++ linux-3.0.7/arch/s390/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
2482@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2483 the loader. We need to make sure that it is out of the way of the program
2484 that it will "exec", and that there is sufficient room for the brk. */
2485
2486-extern unsigned long randomize_et_dyn(unsigned long base);
2487-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2488+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2489+
2490+#ifdef CONFIG_PAX_ASLR
2491+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2492+
2493+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2494+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2495+#endif
2496
2497 /* This yields a mask that user programs can use to figure out what
2498 instruction set this CPU supports. */
2499@@ -210,7 +216,4 @@ struct linux_binprm;
2500 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2501 int arch_setup_additional_pages(struct linux_binprm *, int);
2502
2503-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2504-#define arch_randomize_brk arch_randomize_brk
2505-
2506 #endif
2507diff -urNp linux-3.0.7/arch/s390/include/asm/system.h linux-3.0.7/arch/s390/include/asm/system.h
2508--- linux-3.0.7/arch/s390/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2509+++ linux-3.0.7/arch/s390/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2510@@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2511 extern void (*_machine_halt)(void);
2512 extern void (*_machine_power_off)(void);
2513
2514-extern unsigned long arch_align_stack(unsigned long sp);
2515+#define arch_align_stack(x) ((x) & ~0xfUL)
2516
2517 static inline int tprot(unsigned long addr)
2518 {
2519diff -urNp linux-3.0.7/arch/s390/include/asm/uaccess.h linux-3.0.7/arch/s390/include/asm/uaccess.h
2520--- linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
2521+++ linux-3.0.7/arch/s390/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
2522@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2523 copy_to_user(void __user *to, const void *from, unsigned long n)
2524 {
2525 might_fault();
2526+
2527+ if ((long)n < 0)
2528+ return n;
2529+
2530 if (access_ok(VERIFY_WRITE, to, n))
2531 n = __copy_to_user(to, from, n);
2532 return n;
2533@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2534 static inline unsigned long __must_check
2535 __copy_from_user(void *to, const void __user *from, unsigned long n)
2536 {
2537+ if ((long)n < 0)
2538+ return n;
2539+
2540 if (__builtin_constant_p(n) && (n <= 256))
2541 return uaccess.copy_from_user_small(n, from, to);
2542 else
2543@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2544 unsigned int sz = __compiletime_object_size(to);
2545
2546 might_fault();
2547+
2548+ if ((long)n < 0)
2549+ return n;
2550+
2551 if (unlikely(sz != -1 && sz < n)) {
2552 copy_from_user_overflow();
2553 return n;
2554diff -urNp linux-3.0.7/arch/s390/kernel/module.c linux-3.0.7/arch/s390/kernel/module.c
2555--- linux-3.0.7/arch/s390/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
2556+++ linux-3.0.7/arch/s390/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
2557@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2558
2559 /* Increase core size by size of got & plt and set start
2560 offsets for got and plt. */
2561- me->core_size = ALIGN(me->core_size, 4);
2562- me->arch.got_offset = me->core_size;
2563- me->core_size += me->arch.got_size;
2564- me->arch.plt_offset = me->core_size;
2565- me->core_size += me->arch.plt_size;
2566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2567+ me->arch.got_offset = me->core_size_rw;
2568+ me->core_size_rw += me->arch.got_size;
2569+ me->arch.plt_offset = me->core_size_rx;
2570+ me->core_size_rx += me->arch.plt_size;
2571 return 0;
2572 }
2573
2574@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2575 if (info->got_initialized == 0) {
2576 Elf_Addr *gotent;
2577
2578- gotent = me->module_core + me->arch.got_offset +
2579+ gotent = me->module_core_rw + me->arch.got_offset +
2580 info->got_offset;
2581 *gotent = val;
2582 info->got_initialized = 1;
2583@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 else if (r_type == R_390_GOTENT ||
2585 r_type == R_390_GOTPLTENT)
2586 *(unsigned int *) loc =
2587- (val + (Elf_Addr) me->module_core - loc) >> 1;
2588+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2589 else if (r_type == R_390_GOT64 ||
2590 r_type == R_390_GOTPLT64)
2591 *(unsigned long *) loc = val;
2592@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2594 if (info->plt_initialized == 0) {
2595 unsigned int *ip;
2596- ip = me->module_core + me->arch.plt_offset +
2597+ ip = me->module_core_rx + me->arch.plt_offset +
2598 info->plt_offset;
2599 #ifndef CONFIG_64BIT
2600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2601@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 val - loc + 0xffffUL < 0x1ffffeUL) ||
2603 (r_type == R_390_PLT32DBL &&
2604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2605- val = (Elf_Addr) me->module_core +
2606+ val = (Elf_Addr) me->module_core_rx +
2607 me->arch.plt_offset +
2608 info->plt_offset;
2609 val += rela->r_addend - loc;
2610@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2613 val = val + rela->r_addend -
2614- ((Elf_Addr) me->module_core + me->arch.got_offset);
2615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2616 if (r_type == R_390_GOTOFF16)
2617 *(unsigned short *) loc = val;
2618 else if (r_type == R_390_GOTOFF32)
2619@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 break;
2621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2625 rela->r_addend - loc;
2626 if (r_type == R_390_GOTPC)
2627 *(unsigned int *) loc = val;
2628diff -urNp linux-3.0.7/arch/s390/kernel/process.c linux-3.0.7/arch/s390/kernel/process.c
2629--- linux-3.0.7/arch/s390/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2630+++ linux-3.0.7/arch/s390/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2631@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2632 }
2633 return 0;
2634 }
2635-
2636-unsigned long arch_align_stack(unsigned long sp)
2637-{
2638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2639- sp -= get_random_int() & ~PAGE_MASK;
2640- return sp & ~0xf;
2641-}
2642-
2643-static inline unsigned long brk_rnd(void)
2644-{
2645- /* 8MB for 32bit, 1GB for 64bit */
2646- if (is_32bit_task())
2647- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2648- else
2649- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2650-}
2651-
2652-unsigned long arch_randomize_brk(struct mm_struct *mm)
2653-{
2654- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2655-
2656- if (ret < mm->brk)
2657- return mm->brk;
2658- return ret;
2659-}
2660-
2661-unsigned long randomize_et_dyn(unsigned long base)
2662-{
2663- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2664-
2665- if (!(current->flags & PF_RANDOMIZE))
2666- return base;
2667- if (ret < base)
2668- return base;
2669- return ret;
2670-}
2671diff -urNp linux-3.0.7/arch/s390/kernel/setup.c linux-3.0.7/arch/s390/kernel/setup.c
2672--- linux-3.0.7/arch/s390/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
2673+++ linux-3.0.7/arch/s390/kernel/setup.c 2011-08-23 21:47:55.000000000 -0400
2674@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2675 }
2676 early_param("mem", early_parse_mem);
2677
2678-unsigned int user_mode = HOME_SPACE_MODE;
2679+unsigned int user_mode = SECONDARY_SPACE_MODE;
2680 EXPORT_SYMBOL_GPL(user_mode);
2681
2682 static int set_amode_and_uaccess(unsigned long user_amode,
2683diff -urNp linux-3.0.7/arch/s390/mm/mmap.c linux-3.0.7/arch/s390/mm/mmap.c
2684--- linux-3.0.7/arch/s390/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2685+++ linux-3.0.7/arch/s390/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2686@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2687 */
2688 if (mmap_is_legacy()) {
2689 mm->mmap_base = TASK_UNMAPPED_BASE;
2690+
2691+#ifdef CONFIG_PAX_RANDMMAP
2692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2693+ mm->mmap_base += mm->delta_mmap;
2694+#endif
2695+
2696 mm->get_unmapped_area = arch_get_unmapped_area;
2697 mm->unmap_area = arch_unmap_area;
2698 } else {
2699 mm->mmap_base = mmap_base();
2700+
2701+#ifdef CONFIG_PAX_RANDMMAP
2702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2703+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2704+#endif
2705+
2706 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2707 mm->unmap_area = arch_unmap_area_topdown;
2708 }
2709@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2710 */
2711 if (mmap_is_legacy()) {
2712 mm->mmap_base = TASK_UNMAPPED_BASE;
2713+
2714+#ifdef CONFIG_PAX_RANDMMAP
2715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2716+ mm->mmap_base += mm->delta_mmap;
2717+#endif
2718+
2719 mm->get_unmapped_area = s390_get_unmapped_area;
2720 mm->unmap_area = arch_unmap_area;
2721 } else {
2722 mm->mmap_base = mmap_base();
2723+
2724+#ifdef CONFIG_PAX_RANDMMAP
2725+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2726+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2727+#endif
2728+
2729 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2730 mm->unmap_area = arch_unmap_area_topdown;
2731 }
2732diff -urNp linux-3.0.7/arch/score/include/asm/system.h linux-3.0.7/arch/score/include/asm/system.h
2733--- linux-3.0.7/arch/score/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
2734+++ linux-3.0.7/arch/score/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
2735@@ -17,7 +17,7 @@ do { \
2736 #define finish_arch_switch(prev) do {} while (0)
2737
2738 typedef void (*vi_handler_t)(void);
2739-extern unsigned long arch_align_stack(unsigned long sp);
2740+#define arch_align_stack(x) (x)
2741
2742 #define mb() barrier()
2743 #define rmb() barrier()
2744diff -urNp linux-3.0.7/arch/score/kernel/process.c linux-3.0.7/arch/score/kernel/process.c
2745--- linux-3.0.7/arch/score/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
2746+++ linux-3.0.7/arch/score/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
2747@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2748
2749 return task_pt_regs(task)->cp0_epc;
2750 }
2751-
2752-unsigned long arch_align_stack(unsigned long sp)
2753-{
2754- return sp;
2755-}
2756diff -urNp linux-3.0.7/arch/sh/mm/mmap.c linux-3.0.7/arch/sh/mm/mmap.c
2757--- linux-3.0.7/arch/sh/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
2758+++ linux-3.0.7/arch/sh/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
2759@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2760 addr = PAGE_ALIGN(addr);
2761
2762 vma = find_vma(mm, addr);
2763- if (TASK_SIZE - len >= addr &&
2764- (!vma || addr + len <= vma->vm_start))
2765+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2766 return addr;
2767 }
2768
2769@@ -106,7 +105,7 @@ full_search:
2770 }
2771 return -ENOMEM;
2772 }
2773- if (likely(!vma || addr + len <= vma->vm_start)) {
2774+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2775 /*
2776 * Remember the place where we stopped the search:
2777 */
2778@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2779 addr = PAGE_ALIGN(addr);
2780
2781 vma = find_vma(mm, addr);
2782- if (TASK_SIZE - len >= addr &&
2783- (!vma || addr + len <= vma->vm_start))
2784+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2785 return addr;
2786 }
2787
2788@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2789 /* make sure it can fit in the remaining address space */
2790 if (likely(addr > len)) {
2791 vma = find_vma(mm, addr-len);
2792- if (!vma || addr <= vma->vm_start) {
2793+ if (check_heap_stack_gap(vma, addr - len, len)) {
2794 /* remember the address as a hint for next time */
2795 return (mm->free_area_cache = addr-len);
2796 }
2797@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2798 if (unlikely(mm->mmap_base < len))
2799 goto bottomup;
2800
2801- addr = mm->mmap_base-len;
2802- if (do_colour_align)
2803- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2804+ addr = mm->mmap_base - len;
2805
2806 do {
2807+ if (do_colour_align)
2808+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2809 /*
2810 * Lookup failure means no vma is above this address,
2811 * else if new region fits below vma->vm_start,
2812 * return with success:
2813 */
2814 vma = find_vma(mm, addr);
2815- if (likely(!vma || addr+len <= vma->vm_start)) {
2816+ if (likely(check_heap_stack_gap(vma, addr, len))) {
2817 /* remember the address as a hint for next time */
2818 return (mm->free_area_cache = addr);
2819 }
2820@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2821 mm->cached_hole_size = vma->vm_start - addr;
2822
2823 /* try just below the current vma->vm_start */
2824- addr = vma->vm_start-len;
2825- if (do_colour_align)
2826- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2827- } while (likely(len < vma->vm_start));
2828+ addr = skip_heap_stack_gap(vma, len);
2829+ } while (!IS_ERR_VALUE(addr));
2830
2831 bottomup:
2832 /*
2833diff -urNp linux-3.0.7/arch/sparc/include/asm/atomic_64.h linux-3.0.7/arch/sparc/include/asm/atomic_64.h
2834--- linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-07-21 22:17:23.000000000 -0400
2835+++ linux-3.0.7/arch/sparc/include/asm/atomic_64.h 2011-08-23 21:48:14.000000000 -0400
2836@@ -14,18 +14,40 @@
2837 #define ATOMIC64_INIT(i) { (i) }
2838
2839 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2840+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2841+{
2842+ return v->counter;
2843+}
2844 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2845+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2846+{
2847+ return v->counter;
2848+}
2849
2850 #define atomic_set(v, i) (((v)->counter) = i)
2851+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2852+{
2853+ v->counter = i;
2854+}
2855 #define atomic64_set(v, i) (((v)->counter) = i)
2856+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2857+{
2858+ v->counter = i;
2859+}
2860
2861 extern void atomic_add(int, atomic_t *);
2862+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2863 extern void atomic64_add(long, atomic64_t *);
2864+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2865 extern void atomic_sub(int, atomic_t *);
2866+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2867 extern void atomic64_sub(long, atomic64_t *);
2868+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2869
2870 extern int atomic_add_ret(int, atomic_t *);
2871+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2872 extern long atomic64_add_ret(long, atomic64_t *);
2873+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2874 extern int atomic_sub_ret(int, atomic_t *);
2875 extern long atomic64_sub_ret(long, atomic64_t *);
2876
2877@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2878 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2879
2880 #define atomic_inc_return(v) atomic_add_ret(1, v)
2881+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2882+{
2883+ return atomic_add_ret_unchecked(1, v);
2884+}
2885 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2886+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2887+{
2888+ return atomic64_add_ret_unchecked(1, v);
2889+}
2890
2891 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2892 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2893
2894 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2895+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2896+{
2897+ return atomic_add_ret_unchecked(i, v);
2898+}
2899 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2900+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2901+{
2902+ return atomic64_add_ret_unchecked(i, v);
2903+}
2904
2905 /*
2906 * atomic_inc_and_test - increment and test
2907@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2908 * other cases.
2909 */
2910 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2911+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2912+{
2913+ return atomic_inc_return_unchecked(v) == 0;
2914+}
2915 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2916
2917 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2918@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2919 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2920
2921 #define atomic_inc(v) atomic_add(1, v)
2922+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2923+{
2924+ atomic_add_unchecked(1, v);
2925+}
2926 #define atomic64_inc(v) atomic64_add(1, v)
2927+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2928+{
2929+ atomic64_add_unchecked(1, v);
2930+}
2931
2932 #define atomic_dec(v) atomic_sub(1, v)
2933+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2934+{
2935+ atomic_sub_unchecked(1, v);
2936+}
2937 #define atomic64_dec(v) atomic64_sub(1, v)
2938+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2939+{
2940+ atomic64_sub_unchecked(1, v);
2941+}
2942
2943 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2944 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2945
2946 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2947+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2948+{
2949+ return cmpxchg(&v->counter, old, new);
2950+}
2951 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2952+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2953+{
2954+ return xchg(&v->counter, new);
2955+}
2956
2957 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2958 {
2959- int c, old;
2960+ int c, old, new;
2961 c = atomic_read(v);
2962 for (;;) {
2963- if (unlikely(c == (u)))
2964+ if (unlikely(c == u))
2965 break;
2966- old = atomic_cmpxchg((v), c, c + (a));
2967+
2968+ asm volatile("addcc %2, %0, %0\n"
2969+
2970+#ifdef CONFIG_PAX_REFCOUNT
2971+ "tvs %%icc, 6\n"
2972+#endif
2973+
2974+ : "=r" (new)
2975+ : "0" (c), "ir" (a)
2976+ : "cc");
2977+
2978+ old = atomic_cmpxchg(v, c, new);
2979 if (likely(old == c))
2980 break;
2981 c = old;
2982 }
2983- return c != (u);
2984+ return c != u;
2985 }
2986
2987 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
2988@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
2989 #define atomic64_cmpxchg(v, o, n) \
2990 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2991 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2992+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2993+{
2994+ return xchg(&v->counter, new);
2995+}
2996
2997 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2998 {
2999- long c, old;
3000+ long c, old, new;
3001 c = atomic64_read(v);
3002 for (;;) {
3003- if (unlikely(c == (u)))
3004+ if (unlikely(c == u))
3005 break;
3006- old = atomic64_cmpxchg((v), c, c + (a));
3007+
3008+ asm volatile("addcc %2, %0, %0\n"
3009+
3010+#ifdef CONFIG_PAX_REFCOUNT
3011+ "tvs %%xcc, 6\n"
3012+#endif
3013+
3014+ : "=r" (new)
3015+ : "0" (c), "ir" (a)
3016+ : "cc");
3017+
3018+ old = atomic64_cmpxchg(v, c, new);
3019 if (likely(old == c))
3020 break;
3021 c = old;
3022 }
3023- return c != (u);
3024+ return c != u;
3025 }
3026
3027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3028diff -urNp linux-3.0.7/arch/sparc/include/asm/cache.h linux-3.0.7/arch/sparc/include/asm/cache.h
3029--- linux-3.0.7/arch/sparc/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
3030+++ linux-3.0.7/arch/sparc/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
3031@@ -10,7 +10,7 @@
3032 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3033
3034 #define L1_CACHE_SHIFT 5
3035-#define L1_CACHE_BYTES 32
3036+#define L1_CACHE_BYTES 32UL
3037
3038 #ifdef CONFIG_SPARC32
3039 #define SMP_CACHE_BYTES_SHIFT 5
3040diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_32.h linux-3.0.7/arch/sparc/include/asm/elf_32.h
3041--- linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-07-21 22:17:23.000000000 -0400
3042+++ linux-3.0.7/arch/sparc/include/asm/elf_32.h 2011-08-23 21:47:55.000000000 -0400
3043@@ -114,6 +114,13 @@ typedef struct {
3044
3045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3046
3047+#ifdef CONFIG_PAX_ASLR
3048+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3049+
3050+#define PAX_DELTA_MMAP_LEN 16
3051+#define PAX_DELTA_STACK_LEN 16
3052+#endif
3053+
3054 /* This yields a mask that user programs can use to figure out what
3055 instruction set this cpu supports. This can NOT be done in userspace
3056 on Sparc. */
3057diff -urNp linux-3.0.7/arch/sparc/include/asm/elf_64.h linux-3.0.7/arch/sparc/include/asm/elf_64.h
3058--- linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-09-02 18:11:21.000000000 -0400
3059+++ linux-3.0.7/arch/sparc/include/asm/elf_64.h 2011-08-23 21:47:55.000000000 -0400
3060@@ -180,6 +180,13 @@ typedef struct {
3061 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3062 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3063
3064+#ifdef CONFIG_PAX_ASLR
3065+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3066+
3067+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3068+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3069+#endif
3070+
3071 extern unsigned long sparc64_elf_hwcap;
3072 #define ELF_HWCAP sparc64_elf_hwcap
3073
3074diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtable_32.h linux-3.0.7/arch/sparc/include/asm/pgtable_32.h
3075--- linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
3076+++ linux-3.0.7/arch/sparc/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
3077@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3078 BTFIXUPDEF_INT(page_none)
3079 BTFIXUPDEF_INT(page_copy)
3080 BTFIXUPDEF_INT(page_readonly)
3081+
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+BTFIXUPDEF_INT(page_shared_noexec)
3084+BTFIXUPDEF_INT(page_copy_noexec)
3085+BTFIXUPDEF_INT(page_readonly_noexec)
3086+#endif
3087+
3088 BTFIXUPDEF_INT(page_kernel)
3089
3090 #define PMD_SHIFT SUN4C_PMD_SHIFT
3091@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3092 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3093 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3094
3095+#ifdef CONFIG_PAX_PAGEEXEC
3096+extern pgprot_t PAGE_SHARED_NOEXEC;
3097+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3098+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3099+#else
3100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3101+# define PAGE_COPY_NOEXEC PAGE_COPY
3102+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3103+#endif
3104+
3105 extern unsigned long page_kernel;
3106
3107 #ifdef MODULE
3108diff -urNp linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h
3109--- linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-07-21 22:17:23.000000000 -0400
3110+++ linux-3.0.7/arch/sparc/include/asm/pgtsrmmu.h 2011-08-23 21:47:55.000000000 -0400
3111@@ -115,6 +115,13 @@
3112 SRMMU_EXEC | SRMMU_REF)
3113 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3114 SRMMU_EXEC | SRMMU_REF)
3115+
3116+#ifdef CONFIG_PAX_PAGEEXEC
3117+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3118+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3119+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120+#endif
3121+
3122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3123 SRMMU_DIRTY | SRMMU_REF)
3124
3125diff -urNp linux-3.0.7/arch/sparc/include/asm/spinlock_64.h linux-3.0.7/arch/sparc/include/asm/spinlock_64.h
3126--- linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:54:53.000000000 -0400
3127+++ linux-3.0.7/arch/sparc/include/asm/spinlock_64.h 2011-10-16 21:55:27.000000000 -0400
3128@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3129
3130 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3131
3132-static void inline arch_read_lock(arch_rwlock_t *lock)
3133+static inline void arch_read_lock(arch_rwlock_t *lock)
3134 {
3135 unsigned long tmp1, tmp2;
3136
3137 __asm__ __volatile__ (
3138 "1: ldsw [%2], %0\n"
3139 " brlz,pn %0, 2f\n"
3140-"4: add %0, 1, %1\n"
3141+"4: addcc %0, 1, %1\n"
3142+
3143+#ifdef CONFIG_PAX_REFCOUNT
3144+" tvs %%icc, 6\n"
3145+#endif
3146+
3147 " cas [%2], %0, %1\n"
3148 " cmp %0, %1\n"
3149 " bne,pn %%icc, 1b\n"
3150@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3151 " .previous"
3152 : "=&r" (tmp1), "=&r" (tmp2)
3153 : "r" (lock)
3154- : "memory");
3155+ : "memory", "cc");
3156 }
3157
3158-static int inline arch_read_trylock(arch_rwlock_t *lock)
3159+static inline int arch_read_trylock(arch_rwlock_t *lock)
3160 {
3161 int tmp1, tmp2;
3162
3163@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3164 "1: ldsw [%2], %0\n"
3165 " brlz,a,pn %0, 2f\n"
3166 " mov 0, %0\n"
3167-" add %0, 1, %1\n"
3168+" addcc %0, 1, %1\n"
3169+
3170+#ifdef CONFIG_PAX_REFCOUNT
3171+" tvs %%icc, 6\n"
3172+#endif
3173+
3174 " cas [%2], %0, %1\n"
3175 " cmp %0, %1\n"
3176 " bne,pn %%icc, 1b\n"
3177@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3178 return tmp1;
3179 }
3180
3181-static void inline arch_read_unlock(arch_rwlock_t *lock)
3182+static inline void arch_read_unlock(arch_rwlock_t *lock)
3183 {
3184 unsigned long tmp1, tmp2;
3185
3186 __asm__ __volatile__(
3187 "1: lduw [%2], %0\n"
3188-" sub %0, 1, %1\n"
3189+" subcc %0, 1, %1\n"
3190+
3191+#ifdef CONFIG_PAX_REFCOUNT
3192+" tvs %%icc, 6\n"
3193+#endif
3194+
3195 " cas [%2], %0, %1\n"
3196 " cmp %0, %1\n"
3197 " bne,pn %%xcc, 1b\n"
3198@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3199 : "memory");
3200 }
3201
3202-static void inline arch_write_lock(arch_rwlock_t *lock)
3203+static inline void arch_write_lock(arch_rwlock_t *lock)
3204 {
3205 unsigned long mask, tmp1, tmp2;
3206
3207@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3208 : "memory");
3209 }
3210
3211-static void inline arch_write_unlock(arch_rwlock_t *lock)
3212+static inline void arch_write_unlock(arch_rwlock_t *lock)
3213 {
3214 __asm__ __volatile__(
3215 " stw %%g0, [%0]"
3216@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3217 : "memory");
3218 }
3219
3220-static int inline arch_write_trylock(arch_rwlock_t *lock)
3221+static inline int arch_write_trylock(arch_rwlock_t *lock)
3222 {
3223 unsigned long mask, tmp1, tmp2, result;
3224
3225diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_32.h linux-3.0.7/arch/sparc/include/asm/thread_info_32.h
3226--- linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-07-21 22:17:23.000000000 -0400
3227+++ linux-3.0.7/arch/sparc/include/asm/thread_info_32.h 2011-08-23 21:47:55.000000000 -0400
3228@@ -50,6 +50,8 @@ struct thread_info {
3229 unsigned long w_saved;
3230
3231 struct restart_block restart_block;
3232+
3233+ unsigned long lowest_stack;
3234 };
3235
3236 /*
3237diff -urNp linux-3.0.7/arch/sparc/include/asm/thread_info_64.h linux-3.0.7/arch/sparc/include/asm/thread_info_64.h
3238--- linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-07-21 22:17:23.000000000 -0400
3239+++ linux-3.0.7/arch/sparc/include/asm/thread_info_64.h 2011-08-23 21:47:55.000000000 -0400
3240@@ -63,6 +63,8 @@ struct thread_info {
3241 struct pt_regs *kern_una_regs;
3242 unsigned int kern_una_insn;
3243
3244+ unsigned long lowest_stack;
3245+
3246 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3247 };
3248
3249diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_32.h linux-3.0.7/arch/sparc/include/asm/uaccess_32.h
3250--- linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
3251+++ linux-3.0.7/arch/sparc/include/asm/uaccess_32.h 2011-08-23 21:47:55.000000000 -0400
3252@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3253
3254 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256- if (n && __access_ok((unsigned long) to, n))
3257+ if ((long)n < 0)
3258+ return n;
3259+
3260+ if (n && __access_ok((unsigned long) to, n)) {
3261+ if (!__builtin_constant_p(n))
3262+ check_object_size(from, n, true);
3263 return __copy_user(to, (__force void __user *) from, n);
3264- else
3265+ } else
3266 return n;
3267 }
3268
3269 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3270 {
3271+ if ((long)n < 0)
3272+ return n;
3273+
3274+ if (!__builtin_constant_p(n))
3275+ check_object_size(from, n, true);
3276+
3277 return __copy_user(to, (__force void __user *) from, n);
3278 }
3279
3280 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3281 {
3282- if (n && __access_ok((unsigned long) from, n))
3283+ if ((long)n < 0)
3284+ return n;
3285+
3286+ if (n && __access_ok((unsigned long) from, n)) {
3287+ if (!__builtin_constant_p(n))
3288+ check_object_size(to, n, false);
3289 return __copy_user((__force void __user *) to, from, n);
3290- else
3291+ } else
3292 return n;
3293 }
3294
3295 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3296 {
3297+ if ((long)n < 0)
3298+ return n;
3299+
3300 return __copy_user((__force void __user *) to, from, n);
3301 }
3302
3303diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess_64.h linux-3.0.7/arch/sparc/include/asm/uaccess_64.h
3304--- linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
3305+++ linux-3.0.7/arch/sparc/include/asm/uaccess_64.h 2011-08-23 21:47:55.000000000 -0400
3306@@ -10,6 +10,7 @@
3307 #include <linux/compiler.h>
3308 #include <linux/string.h>
3309 #include <linux/thread_info.h>
3310+#include <linux/kernel.h>
3311 #include <asm/asi.h>
3312 #include <asm/system.h>
3313 #include <asm/spitfire.h>
3314@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3315 static inline unsigned long __must_check
3316 copy_from_user(void *to, const void __user *from, unsigned long size)
3317 {
3318- unsigned long ret = ___copy_from_user(to, from, size);
3319+ unsigned long ret;
3320
3321+ if ((long)size < 0 || size > INT_MAX)
3322+ return size;
3323+
3324+ if (!__builtin_constant_p(size))
3325+ check_object_size(to, size, false);
3326+
3327+ ret = ___copy_from_user(to, from, size);
3328 if (unlikely(ret))
3329 ret = copy_from_user_fixup(to, from, size);
3330
3331@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3332 static inline unsigned long __must_check
3333 copy_to_user(void __user *to, const void *from, unsigned long size)
3334 {
3335- unsigned long ret = ___copy_to_user(to, from, size);
3336+ unsigned long ret;
3337+
3338+ if ((long)size < 0 || size > INT_MAX)
3339+ return size;
3340+
3341+ if (!__builtin_constant_p(size))
3342+ check_object_size(from, size, true);
3343
3344+ ret = ___copy_to_user(to, from, size);
3345 if (unlikely(ret))
3346 ret = copy_to_user_fixup(to, from, size);
3347 return ret;
3348diff -urNp linux-3.0.7/arch/sparc/include/asm/uaccess.h linux-3.0.7/arch/sparc/include/asm/uaccess.h
3349--- linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
3350+++ linux-3.0.7/arch/sparc/include/asm/uaccess.h 2011-08-23 21:47:55.000000000 -0400
3351@@ -1,5 +1,13 @@
3352 #ifndef ___ASM_SPARC_UACCESS_H
3353 #define ___ASM_SPARC_UACCESS_H
3354+
3355+#ifdef __KERNEL__
3356+#ifndef __ASSEMBLY__
3357+#include <linux/types.h>
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+#endif
3360+#endif
3361+
3362 #if defined(__sparc__) && defined(__arch64__)
3363 #include <asm/uaccess_64.h>
3364 #else
3365diff -urNp linux-3.0.7/arch/sparc/kernel/Makefile linux-3.0.7/arch/sparc/kernel/Makefile
3366--- linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:54:53.000000000 -0400
3367+++ linux-3.0.7/arch/sparc/kernel/Makefile 2011-10-16 21:55:27.000000000 -0400
3368@@ -3,7 +3,7 @@
3369 #
3370
3371 asflags-y := -ansi
3372-ccflags-y := -Werror
3373+#ccflags-y := -Werror
3374
3375 extra-y := head_$(BITS).o
3376 extra-y += init_task.o
3377diff -urNp linux-3.0.7/arch/sparc/kernel/process_32.c linux-3.0.7/arch/sparc/kernel/process_32.c
3378--- linux-3.0.7/arch/sparc/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
3379+++ linux-3.0.7/arch/sparc/kernel/process_32.c 2011-08-23 21:48:14.000000000 -0400
3380@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3381 rw->ins[4], rw->ins[5],
3382 rw->ins[6],
3383 rw->ins[7]);
3384- printk("%pS\n", (void *) rw->ins[7]);
3385+ printk("%pA\n", (void *) rw->ins[7]);
3386 rw = (struct reg_window32 *) rw->ins[6];
3387 }
3388 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3389@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3390
3391 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3392 r->psr, r->pc, r->npc, r->y, print_tainted());
3393- printk("PC: <%pS>\n", (void *) r->pc);
3394+ printk("PC: <%pA>\n", (void *) r->pc);
3395 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3396 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3397 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3398 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3399 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3400 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3401- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3402+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3403
3404 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3405 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3406@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3407 rw = (struct reg_window32 *) fp;
3408 pc = rw->ins[7];
3409 printk("[%08lx : ", pc);
3410- printk("%pS ] ", (void *) pc);
3411+ printk("%pA ] ", (void *) pc);
3412 fp = rw->ins[6];
3413 } while (++count < 16);
3414 printk("\n");
3415diff -urNp linux-3.0.7/arch/sparc/kernel/process_64.c linux-3.0.7/arch/sparc/kernel/process_64.c
3416--- linux-3.0.7/arch/sparc/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
3417+++ linux-3.0.7/arch/sparc/kernel/process_64.c 2011-08-23 21:48:14.000000000 -0400
3418@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3419 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3420 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3421 if (regs->tstate & TSTATE_PRIV)
3422- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3423+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3424 }
3425
3426 void show_regs(struct pt_regs *regs)
3427 {
3428 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3429 regs->tpc, regs->tnpc, regs->y, print_tainted());
3430- printk("TPC: <%pS>\n", (void *) regs->tpc);
3431+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3432 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3433 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3434 regs->u_regs[3]);
3435@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3436 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3437 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3438 regs->u_regs[15]);
3439- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3440+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3441 show_regwindow(regs);
3442 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3443 }
3444@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3445 ((tp && tp->task) ? tp->task->pid : -1));
3446
3447 if (gp->tstate & TSTATE_PRIV) {
3448- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3449+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3450 (void *) gp->tpc,
3451 (void *) gp->o7,
3452 (void *) gp->i7,
3453diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c
3454--- linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-07-21 22:17:23.000000000 -0400
3455+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_32.c 2011-08-23 21:47:55.000000000 -0400
3456@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3457 if (ARCH_SUN4C && len > 0x20000000)
3458 return -ENOMEM;
3459 if (!addr)
3460- addr = TASK_UNMAPPED_BASE;
3461+ addr = current->mm->mmap_base;
3462
3463 if (flags & MAP_SHARED)
3464 addr = COLOUR_ALIGN(addr);
3465@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3466 }
3467 if (TASK_SIZE - PAGE_SIZE - len < addr)
3468 return -ENOMEM;
3469- if (!vmm || addr + len <= vmm->vm_start)
3470+ if (check_heap_stack_gap(vmm, addr, len))
3471 return addr;
3472 addr = vmm->vm_end;
3473 if (flags & MAP_SHARED)
3474diff -urNp linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c
3475--- linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-07-21 22:17:23.000000000 -0400
3476+++ linux-3.0.7/arch/sparc/kernel/sys_sparc_64.c 2011-08-23 21:47:55.000000000 -0400
3477@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3478 /* We do not accept a shared mapping if it would violate
3479 * cache aliasing constraints.
3480 */
3481- if ((flags & MAP_SHARED) &&
3482+ if ((filp || (flags & MAP_SHARED)) &&
3483 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3484 return -EINVAL;
3485 return addr;
3486@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3487 if (filp || (flags & MAP_SHARED))
3488 do_color_align = 1;
3489
3490+#ifdef CONFIG_PAX_RANDMMAP
3491+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3492+#endif
3493+
3494 if (addr) {
3495 if (do_color_align)
3496 addr = COLOUR_ALIGN(addr, pgoff);
3497@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3498 addr = PAGE_ALIGN(addr);
3499
3500 vma = find_vma(mm, addr);
3501- if (task_size - len >= addr &&
3502- (!vma || addr + len <= vma->vm_start))
3503+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3504 return addr;
3505 }
3506
3507 if (len > mm->cached_hole_size) {
3508- start_addr = addr = mm->free_area_cache;
3509+ start_addr = addr = mm->free_area_cache;
3510 } else {
3511- start_addr = addr = TASK_UNMAPPED_BASE;
3512+ start_addr = addr = mm->mmap_base;
3513 mm->cached_hole_size = 0;
3514 }
3515
3516@@ -174,14 +177,14 @@ full_search:
3517 vma = find_vma(mm, VA_EXCLUDE_END);
3518 }
3519 if (unlikely(task_size < addr)) {
3520- if (start_addr != TASK_UNMAPPED_BASE) {
3521- start_addr = addr = TASK_UNMAPPED_BASE;
3522+ if (start_addr != mm->mmap_base) {
3523+ start_addr = addr = mm->mmap_base;
3524 mm->cached_hole_size = 0;
3525 goto full_search;
3526 }
3527 return -ENOMEM;
3528 }
3529- if (likely(!vma || addr + len <= vma->vm_start)) {
3530+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3531 /*
3532 * Remember the place where we stopped the search:
3533 */
3534@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3535 /* We do not accept a shared mapping if it would violate
3536 * cache aliasing constraints.
3537 */
3538- if ((flags & MAP_SHARED) &&
3539+ if ((filp || (flags & MAP_SHARED)) &&
3540 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3541 return -EINVAL;
3542 return addr;
3543@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3544 addr = PAGE_ALIGN(addr);
3545
3546 vma = find_vma(mm, addr);
3547- if (task_size - len >= addr &&
3548- (!vma || addr + len <= vma->vm_start))
3549+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3550 return addr;
3551 }
3552
3553@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3554 /* make sure it can fit in the remaining address space */
3555 if (likely(addr > len)) {
3556 vma = find_vma(mm, addr-len);
3557- if (!vma || addr <= vma->vm_start) {
3558+ if (check_heap_stack_gap(vma, addr - len, len)) {
3559 /* remember the address as a hint for next time */
3560 return (mm->free_area_cache = addr-len);
3561 }
3562@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3563 if (unlikely(mm->mmap_base < len))
3564 goto bottomup;
3565
3566- addr = mm->mmap_base-len;
3567- if (do_color_align)
3568- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3569+ addr = mm->mmap_base - len;
3570
3571 do {
3572+ if (do_color_align)
3573+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3574 /*
3575 * Lookup failure means no vma is above this address,
3576 * else if new region fits below vma->vm_start,
3577 * return with success:
3578 */
3579 vma = find_vma(mm, addr);
3580- if (likely(!vma || addr+len <= vma->vm_start)) {
3581+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3582 /* remember the address as a hint for next time */
3583 return (mm->free_area_cache = addr);
3584 }
3585@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3586 mm->cached_hole_size = vma->vm_start - addr;
3587
3588 /* try just below the current vma->vm_start */
3589- addr = vma->vm_start-len;
3590- if (do_color_align)
3591- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3592- } while (likely(len < vma->vm_start));
3593+ addr = skip_heap_stack_gap(vma, len);
3594+ } while (!IS_ERR_VALUE(addr));
3595
3596 bottomup:
3597 /*
3598@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3599 gap == RLIM_INFINITY ||
3600 sysctl_legacy_va_layout) {
3601 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3602+
3603+#ifdef CONFIG_PAX_RANDMMAP
3604+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3605+ mm->mmap_base += mm->delta_mmap;
3606+#endif
3607+
3608 mm->get_unmapped_area = arch_get_unmapped_area;
3609 mm->unmap_area = arch_unmap_area;
3610 } else {
3611@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3612 gap = (task_size / 6 * 5);
3613
3614 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3615+
3616+#ifdef CONFIG_PAX_RANDMMAP
3617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3619+#endif
3620+
3621 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3622 mm->unmap_area = arch_unmap_area_topdown;
3623 }
3624diff -urNp linux-3.0.7/arch/sparc/kernel/traps_32.c linux-3.0.7/arch/sparc/kernel/traps_32.c
3625--- linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-07-21 22:17:23.000000000 -0400
3626+++ linux-3.0.7/arch/sparc/kernel/traps_32.c 2011-08-23 21:48:14.000000000 -0400
3627@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3628 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3629 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3630
3631+extern void gr_handle_kernel_exploit(void);
3632+
3633 void die_if_kernel(char *str, struct pt_regs *regs)
3634 {
3635 static int die_counter;
3636@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3637 count++ < 30 &&
3638 (((unsigned long) rw) >= PAGE_OFFSET) &&
3639 !(((unsigned long) rw) & 0x7)) {
3640- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3641+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3642 (void *) rw->ins[7]);
3643 rw = (struct reg_window32 *)rw->ins[6];
3644 }
3645 }
3646 printk("Instruction DUMP:");
3647 instruction_dump ((unsigned long *) regs->pc);
3648- if(regs->psr & PSR_PS)
3649+ if(regs->psr & PSR_PS) {
3650+ gr_handle_kernel_exploit();
3651 do_exit(SIGKILL);
3652+ }
3653 do_exit(SIGSEGV);
3654 }
3655
3656diff -urNp linux-3.0.7/arch/sparc/kernel/traps_64.c linux-3.0.7/arch/sparc/kernel/traps_64.c
3657--- linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-07-21 22:17:23.000000000 -0400
3658+++ linux-3.0.7/arch/sparc/kernel/traps_64.c 2011-08-23 21:48:14.000000000 -0400
3659@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3660 i + 1,
3661 p->trapstack[i].tstate, p->trapstack[i].tpc,
3662 p->trapstack[i].tnpc, p->trapstack[i].tt);
3663- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3664+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3665 }
3666 }
3667
3668@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3669
3670 lvl -= 0x100;
3671 if (regs->tstate & TSTATE_PRIV) {
3672+
3673+#ifdef CONFIG_PAX_REFCOUNT
3674+ if (lvl == 6)
3675+ pax_report_refcount_overflow(regs);
3676+#endif
3677+
3678 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3679 die_if_kernel(buffer, regs);
3680 }
3681@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3682 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3683 {
3684 char buffer[32];
3685-
3686+
3687 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3688 0, lvl, SIGTRAP) == NOTIFY_STOP)
3689 return;
3690
3691+#ifdef CONFIG_PAX_REFCOUNT
3692+ if (lvl == 6)
3693+ pax_report_refcount_overflow(regs);
3694+#endif
3695+
3696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3697
3698 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3699@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3700 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3701 printk("%s" "ERROR(%d): ",
3702 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3703- printk("TPC<%pS>\n", (void *) regs->tpc);
3704+ printk("TPC<%pA>\n", (void *) regs->tpc);
3705 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3707 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3708@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3709 smp_processor_id(),
3710 (type & 0x1) ? 'I' : 'D',
3711 regs->tpc);
3712- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3713+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3714 panic("Irrecoverable Cheetah+ parity error.");
3715 }
3716
3717@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3718 smp_processor_id(),
3719 (type & 0x1) ? 'I' : 'D',
3720 regs->tpc);
3721- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3722+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3723 }
3724
3725 struct sun4v_error_entry {
3726@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3727
3728 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3729 regs->tpc, tl);
3730- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3731+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3732 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3733- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3734+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3735 (void *) regs->u_regs[UREG_I7]);
3736 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3737 "pte[%lx] error[%lx]\n",
3738@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3739
3740 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3741 regs->tpc, tl);
3742- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3743+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3744 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3745- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3746+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3747 (void *) regs->u_regs[UREG_I7]);
3748 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3749 "pte[%lx] error[%lx]\n",
3750@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3751 fp = (unsigned long)sf->fp + STACK_BIAS;
3752 }
3753
3754- printk(" [%016lx] %pS\n", pc, (void *) pc);
3755+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3758 int index = tsk->curr_ret_stack;
3759 if (tsk->ret_stack && index >= graph) {
3760 pc = tsk->ret_stack[index - graph].ret;
3761- printk(" [%016lx] %pS\n", pc, (void *) pc);
3762+ printk(" [%016lx] %pA\n", pc, (void *) pc);
3763 graph++;
3764 }
3765 }
3766@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3767 return (struct reg_window *) (fp + STACK_BIAS);
3768 }
3769
3770+extern void gr_handle_kernel_exploit(void);
3771+
3772 void die_if_kernel(char *str, struct pt_regs *regs)
3773 {
3774 static int die_counter;
3775@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3776 while (rw &&
3777 count++ < 30 &&
3778 kstack_valid(tp, (unsigned long) rw)) {
3779- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3780+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3781 (void *) rw->ins[7]);
3782
3783 rw = kernel_stack_up(rw);
3784@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3785 }
3786 user_instruction_dump ((unsigned int __user *) regs->tpc);
3787 }
3788- if (regs->tstate & TSTATE_PRIV)
3789+ if (regs->tstate & TSTATE_PRIV) {
3790+ gr_handle_kernel_exploit();
3791 do_exit(SIGKILL);
3792+ }
3793 do_exit(SIGSEGV);
3794 }
3795 EXPORT_SYMBOL(die_if_kernel);
3796diff -urNp linux-3.0.7/arch/sparc/kernel/unaligned_64.c linux-3.0.7/arch/sparc/kernel/unaligned_64.c
3797--- linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-09-02 18:11:21.000000000 -0400
3798+++ linux-3.0.7/arch/sparc/kernel/unaligned_64.c 2011-08-23 21:48:14.000000000 -0400
3799@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3800 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3801
3802 if (__ratelimit(&ratelimit)) {
3803- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3804+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3805 regs->tpc, (void *) regs->tpc);
3806 }
3807 }
3808diff -urNp linux-3.0.7/arch/sparc/lib/atomic_64.S linux-3.0.7/arch/sparc/lib/atomic_64.S
3809--- linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-07-21 22:17:23.000000000 -0400
3810+++ linux-3.0.7/arch/sparc/lib/atomic_64.S 2011-08-23 21:47:55.000000000 -0400
3811@@ -18,7 +18,12 @@
3812 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3813 BACKOFF_SETUP(%o2)
3814 1: lduw [%o1], %g1
3815- add %g1, %o0, %g7
3816+ addcc %g1, %o0, %g7
3817+
3818+#ifdef CONFIG_PAX_REFCOUNT
3819+ tvs %icc, 6
3820+#endif
3821+
3822 cas [%o1], %g1, %g7
3823 cmp %g1, %g7
3824 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3825@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3826 2: BACKOFF_SPIN(%o2, %o3, 1b)
3827 .size atomic_add, .-atomic_add
3828
3829+ .globl atomic_add_unchecked
3830+ .type atomic_add_unchecked,#function
3831+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3832+ BACKOFF_SETUP(%o2)
3833+1: lduw [%o1], %g1
3834+ add %g1, %o0, %g7
3835+ cas [%o1], %g1, %g7
3836+ cmp %g1, %g7
3837+ bne,pn %icc, 2f
3838+ nop
3839+ retl
3840+ nop
3841+2: BACKOFF_SPIN(%o2, %o3, 1b)
3842+ .size atomic_add_unchecked, .-atomic_add_unchecked
3843+
3844 .globl atomic_sub
3845 .type atomic_sub,#function
3846 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3847 BACKOFF_SETUP(%o2)
3848 1: lduw [%o1], %g1
3849- sub %g1, %o0, %g7
3850+ subcc %g1, %o0, %g7
3851+
3852+#ifdef CONFIG_PAX_REFCOUNT
3853+ tvs %icc, 6
3854+#endif
3855+
3856 cas [%o1], %g1, %g7
3857 cmp %g1, %g7
3858 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3859@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3860 2: BACKOFF_SPIN(%o2, %o3, 1b)
3861 .size atomic_sub, .-atomic_sub
3862
3863+ .globl atomic_sub_unchecked
3864+ .type atomic_sub_unchecked,#function
3865+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3866+ BACKOFF_SETUP(%o2)
3867+1: lduw [%o1], %g1
3868+ sub %g1, %o0, %g7
3869+ cas [%o1], %g1, %g7
3870+ cmp %g1, %g7
3871+ bne,pn %icc, 2f
3872+ nop
3873+ retl
3874+ nop
3875+2: BACKOFF_SPIN(%o2, %o3, 1b)
3876+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3877+
3878 .globl atomic_add_ret
3879 .type atomic_add_ret,#function
3880 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3881 BACKOFF_SETUP(%o2)
3882 1: lduw [%o1], %g1
3883- add %g1, %o0, %g7
3884+ addcc %g1, %o0, %g7
3885+
3886+#ifdef CONFIG_PAX_REFCOUNT
3887+ tvs %icc, 6
3888+#endif
3889+
3890 cas [%o1], %g1, %g7
3891 cmp %g1, %g7
3892 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3893@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3894 2: BACKOFF_SPIN(%o2, %o3, 1b)
3895 .size atomic_add_ret, .-atomic_add_ret
3896
3897+ .globl atomic_add_ret_unchecked
3898+ .type atomic_add_ret_unchecked,#function
3899+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3900+ BACKOFF_SETUP(%o2)
3901+1: lduw [%o1], %g1
3902+ addcc %g1, %o0, %g7
3903+ cas [%o1], %g1, %g7
3904+ cmp %g1, %g7
3905+ bne,pn %icc, 2f
3906+ add %g7, %o0, %g7
3907+ sra %g7, 0, %o0
3908+ retl
3909+ nop
3910+2: BACKOFF_SPIN(%o2, %o3, 1b)
3911+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3912+
3913 .globl atomic_sub_ret
3914 .type atomic_sub_ret,#function
3915 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3916 BACKOFF_SETUP(%o2)
3917 1: lduw [%o1], %g1
3918- sub %g1, %o0, %g7
3919+ subcc %g1, %o0, %g7
3920+
3921+#ifdef CONFIG_PAX_REFCOUNT
3922+ tvs %icc, 6
3923+#endif
3924+
3925 cas [%o1], %g1, %g7
3926 cmp %g1, %g7
3927 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3928@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3929 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3930 BACKOFF_SETUP(%o2)
3931 1: ldx [%o1], %g1
3932- add %g1, %o0, %g7
3933+ addcc %g1, %o0, %g7
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ tvs %xcc, 6
3937+#endif
3938+
3939 casx [%o1], %g1, %g7
3940 cmp %g1, %g7
3941 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3942@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3943 2: BACKOFF_SPIN(%o2, %o3, 1b)
3944 .size atomic64_add, .-atomic64_add
3945
3946+ .globl atomic64_add_unchecked
3947+ .type atomic64_add_unchecked,#function
3948+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3949+ BACKOFF_SETUP(%o2)
3950+1: ldx [%o1], %g1
3951+ addcc %g1, %o0, %g7
3952+ casx [%o1], %g1, %g7
3953+ cmp %g1, %g7
3954+ bne,pn %xcc, 2f
3955+ nop
3956+ retl
3957+ nop
3958+2: BACKOFF_SPIN(%o2, %o3, 1b)
3959+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3960+
3961 .globl atomic64_sub
3962 .type atomic64_sub,#function
3963 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3964 BACKOFF_SETUP(%o2)
3965 1: ldx [%o1], %g1
3966- sub %g1, %o0, %g7
3967+ subcc %g1, %o0, %g7
3968+
3969+#ifdef CONFIG_PAX_REFCOUNT
3970+ tvs %xcc, 6
3971+#endif
3972+
3973 casx [%o1], %g1, %g7
3974 cmp %g1, %g7
3975 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3976@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3977 2: BACKOFF_SPIN(%o2, %o3, 1b)
3978 .size atomic64_sub, .-atomic64_sub
3979
3980+ .globl atomic64_sub_unchecked
3981+ .type atomic64_sub_unchecked,#function
3982+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3983+ BACKOFF_SETUP(%o2)
3984+1: ldx [%o1], %g1
3985+ subcc %g1, %o0, %g7
3986+ casx [%o1], %g1, %g7
3987+ cmp %g1, %g7
3988+ bne,pn %xcc, 2f
3989+ nop
3990+ retl
3991+ nop
3992+2: BACKOFF_SPIN(%o2, %o3, 1b)
3993+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3994+
3995 .globl atomic64_add_ret
3996 .type atomic64_add_ret,#function
3997 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3998 BACKOFF_SETUP(%o2)
3999 1: ldx [%o1], %g1
4000- add %g1, %o0, %g7
4001+ addcc %g1, %o0, %g7
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+ tvs %xcc, 6
4005+#endif
4006+
4007 casx [%o1], %g1, %g7
4008 cmp %g1, %g7
4009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4010@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4011 2: BACKOFF_SPIN(%o2, %o3, 1b)
4012 .size atomic64_add_ret, .-atomic64_add_ret
4013
4014+ .globl atomic64_add_ret_unchecked
4015+ .type atomic64_add_ret_unchecked,#function
4016+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4017+ BACKOFF_SETUP(%o2)
4018+1: ldx [%o1], %g1
4019+ addcc %g1, %o0, %g7
4020+ casx [%o1], %g1, %g7
4021+ cmp %g1, %g7
4022+ bne,pn %xcc, 2f
4023+ add %g7, %o0, %g7
4024+ mov %g7, %o0
4025+ retl
4026+ nop
4027+2: BACKOFF_SPIN(%o2, %o3, 1b)
4028+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4029+
4030 .globl atomic64_sub_ret
4031 .type atomic64_sub_ret,#function
4032 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4033 BACKOFF_SETUP(%o2)
4034 1: ldx [%o1], %g1
4035- sub %g1, %o0, %g7
4036+ subcc %g1, %o0, %g7
4037+
4038+#ifdef CONFIG_PAX_REFCOUNT
4039+ tvs %xcc, 6
4040+#endif
4041+
4042 casx [%o1], %g1, %g7
4043 cmp %g1, %g7
4044 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4045diff -urNp linux-3.0.7/arch/sparc/lib/ksyms.c linux-3.0.7/arch/sparc/lib/ksyms.c
4046--- linux-3.0.7/arch/sparc/lib/ksyms.c 2011-07-21 22:17:23.000000000 -0400
4047+++ linux-3.0.7/arch/sparc/lib/ksyms.c 2011-08-23 21:48:14.000000000 -0400
4048@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4049
4050 /* Atomic counter implementation. */
4051 EXPORT_SYMBOL(atomic_add);
4052+EXPORT_SYMBOL(atomic_add_unchecked);
4053 EXPORT_SYMBOL(atomic_add_ret);
4054+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4055 EXPORT_SYMBOL(atomic_sub);
4056+EXPORT_SYMBOL(atomic_sub_unchecked);
4057 EXPORT_SYMBOL(atomic_sub_ret);
4058 EXPORT_SYMBOL(atomic64_add);
4059+EXPORT_SYMBOL(atomic64_add_unchecked);
4060 EXPORT_SYMBOL(atomic64_add_ret);
4061+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4062 EXPORT_SYMBOL(atomic64_sub);
4063+EXPORT_SYMBOL(atomic64_sub_unchecked);
4064 EXPORT_SYMBOL(atomic64_sub_ret);
4065
4066 /* Atomic bit operations. */
4067diff -urNp linux-3.0.7/arch/sparc/lib/Makefile linux-3.0.7/arch/sparc/lib/Makefile
4068--- linux-3.0.7/arch/sparc/lib/Makefile 2011-09-02 18:11:21.000000000 -0400
4069+++ linux-3.0.7/arch/sparc/lib/Makefile 2011-08-23 21:47:55.000000000 -0400
4070@@ -2,7 +2,7 @@
4071 #
4072
4073 asflags-y := -ansi -DST_DIV0=0x02
4074-ccflags-y := -Werror
4075+#ccflags-y := -Werror
4076
4077 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4078 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4079diff -urNp linux-3.0.7/arch/sparc/Makefile linux-3.0.7/arch/sparc/Makefile
4080--- linux-3.0.7/arch/sparc/Makefile 2011-07-21 22:17:23.000000000 -0400
4081+++ linux-3.0.7/arch/sparc/Makefile 2011-08-23 21:48:14.000000000 -0400
4082@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4083 # Export what is needed by arch/sparc/boot/Makefile
4084 export VMLINUX_INIT VMLINUX_MAIN
4085 VMLINUX_INIT := $(head-y) $(init-y)
4086-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4087+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4088 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4089 VMLINUX_MAIN += $(drivers-y) $(net-y)
4090
4091diff -urNp linux-3.0.7/arch/sparc/mm/fault_32.c linux-3.0.7/arch/sparc/mm/fault_32.c
4092--- linux-3.0.7/arch/sparc/mm/fault_32.c 2011-07-21 22:17:23.000000000 -0400
4093+++ linux-3.0.7/arch/sparc/mm/fault_32.c 2011-08-23 21:47:55.000000000 -0400
4094@@ -22,6 +22,9 @@
4095 #include <linux/interrupt.h>
4096 #include <linux/module.h>
4097 #include <linux/kdebug.h>
4098+#include <linux/slab.h>
4099+#include <linux/pagemap.h>
4100+#include <linux/compiler.h>
4101
4102 #include <asm/system.h>
4103 #include <asm/page.h>
4104@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4105 return safe_compute_effective_address(regs, insn);
4106 }
4107
4108+#ifdef CONFIG_PAX_PAGEEXEC
4109+#ifdef CONFIG_PAX_DLRESOLVE
4110+static void pax_emuplt_close(struct vm_area_struct *vma)
4111+{
4112+ vma->vm_mm->call_dl_resolve = 0UL;
4113+}
4114+
4115+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4116+{
4117+ unsigned int *kaddr;
4118+
4119+ vmf->page = alloc_page(GFP_HIGHUSER);
4120+ if (!vmf->page)
4121+ return VM_FAULT_OOM;
4122+
4123+ kaddr = kmap(vmf->page);
4124+ memset(kaddr, 0, PAGE_SIZE);
4125+ kaddr[0] = 0x9DE3BFA8U; /* save */
4126+ flush_dcache_page(vmf->page);
4127+ kunmap(vmf->page);
4128+ return VM_FAULT_MAJOR;
4129+}
4130+
4131+static const struct vm_operations_struct pax_vm_ops = {
4132+ .close = pax_emuplt_close,
4133+ .fault = pax_emuplt_fault
4134+};
4135+
4136+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4137+{
4138+ int ret;
4139+
4140+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4141+ vma->vm_mm = current->mm;
4142+ vma->vm_start = addr;
4143+ vma->vm_end = addr + PAGE_SIZE;
4144+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4145+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4146+ vma->vm_ops = &pax_vm_ops;
4147+
4148+ ret = insert_vm_struct(current->mm, vma);
4149+ if (ret)
4150+ return ret;
4151+
4152+ ++current->mm->total_vm;
4153+ return 0;
4154+}
4155+#endif
4156+
4157+/*
4158+ * PaX: decide what to do with offenders (regs->pc = fault address)
4159+ *
4160+ * returns 1 when task should be killed
4161+ * 2 when patched PLT trampoline was detected
4162+ * 3 when unpatched PLT trampoline was detected
4163+ */
4164+static int pax_handle_fetch_fault(struct pt_regs *regs)
4165+{
4166+
4167+#ifdef CONFIG_PAX_EMUPLT
4168+ int err;
4169+
4170+ do { /* PaX: patched PLT emulation #1 */
4171+ unsigned int sethi1, sethi2, jmpl;
4172+
4173+ err = get_user(sethi1, (unsigned int *)regs->pc);
4174+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4175+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4176+
4177+ if (err)
4178+ break;
4179+
4180+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4181+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4182+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4183+ {
4184+ unsigned int addr;
4185+
4186+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4187+ addr = regs->u_regs[UREG_G1];
4188+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4189+ regs->pc = addr;
4190+ regs->npc = addr+4;
4191+ return 2;
4192+ }
4193+ } while (0);
4194+
4195+ { /* PaX: patched PLT emulation #2 */
4196+ unsigned int ba;
4197+
4198+ err = get_user(ba, (unsigned int *)regs->pc);
4199+
4200+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4201+ unsigned int addr;
4202+
4203+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4204+ regs->pc = addr;
4205+ regs->npc = addr+4;
4206+ return 2;
4207+ }
4208+ }
4209+
4210+ do { /* PaX: patched PLT emulation #3 */
4211+ unsigned int sethi, jmpl, nop;
4212+
4213+ err = get_user(sethi, (unsigned int *)regs->pc);
4214+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4215+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4216+
4217+ if (err)
4218+ break;
4219+
4220+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4221+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4222+ nop == 0x01000000U)
4223+ {
4224+ unsigned int addr;
4225+
4226+ addr = (sethi & 0x003FFFFFU) << 10;
4227+ regs->u_regs[UREG_G1] = addr;
4228+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4229+ regs->pc = addr;
4230+ regs->npc = addr+4;
4231+ return 2;
4232+ }
4233+ } while (0);
4234+
4235+ do { /* PaX: unpatched PLT emulation step 1 */
4236+ unsigned int sethi, ba, nop;
4237+
4238+ err = get_user(sethi, (unsigned int *)regs->pc);
4239+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4240+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4241+
4242+ if (err)
4243+ break;
4244+
4245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4246+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4247+ nop == 0x01000000U)
4248+ {
4249+ unsigned int addr, save, call;
4250+
4251+ if ((ba & 0xFFC00000U) == 0x30800000U)
4252+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4253+ else
4254+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4255+
4256+ err = get_user(save, (unsigned int *)addr);
4257+ err |= get_user(call, (unsigned int *)(addr+4));
4258+ err |= get_user(nop, (unsigned int *)(addr+8));
4259+ if (err)
4260+ break;
4261+
4262+#ifdef CONFIG_PAX_DLRESOLVE
4263+ if (save == 0x9DE3BFA8U &&
4264+ (call & 0xC0000000U) == 0x40000000U &&
4265+ nop == 0x01000000U)
4266+ {
4267+ struct vm_area_struct *vma;
4268+ unsigned long call_dl_resolve;
4269+
4270+ down_read(&current->mm->mmap_sem);
4271+ call_dl_resolve = current->mm->call_dl_resolve;
4272+ up_read(&current->mm->mmap_sem);
4273+ if (likely(call_dl_resolve))
4274+ goto emulate;
4275+
4276+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4277+
4278+ down_write(&current->mm->mmap_sem);
4279+ if (current->mm->call_dl_resolve) {
4280+ call_dl_resolve = current->mm->call_dl_resolve;
4281+ up_write(&current->mm->mmap_sem);
4282+ if (vma)
4283+ kmem_cache_free(vm_area_cachep, vma);
4284+ goto emulate;
4285+ }
4286+
4287+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4288+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4289+ up_write(&current->mm->mmap_sem);
4290+ if (vma)
4291+ kmem_cache_free(vm_area_cachep, vma);
4292+ return 1;
4293+ }
4294+
4295+ if (pax_insert_vma(vma, call_dl_resolve)) {
4296+ up_write(&current->mm->mmap_sem);
4297+ kmem_cache_free(vm_area_cachep, vma);
4298+ return 1;
4299+ }
4300+
4301+ current->mm->call_dl_resolve = call_dl_resolve;
4302+ up_write(&current->mm->mmap_sem);
4303+
4304+emulate:
4305+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4306+ regs->pc = call_dl_resolve;
4307+ regs->npc = addr+4;
4308+ return 3;
4309+ }
4310+#endif
4311+
4312+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4313+ if ((save & 0xFFC00000U) == 0x05000000U &&
4314+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4315+ nop == 0x01000000U)
4316+ {
4317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4318+ regs->u_regs[UREG_G2] = addr + 4;
4319+ addr = (save & 0x003FFFFFU) << 10;
4320+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4321+ regs->pc = addr;
4322+ regs->npc = addr+4;
4323+ return 3;
4324+ }
4325+ }
4326+ } while (0);
4327+
4328+ do { /* PaX: unpatched PLT emulation step 2 */
4329+ unsigned int save, call, nop;
4330+
4331+ err = get_user(save, (unsigned int *)(regs->pc-4));
4332+ err |= get_user(call, (unsigned int *)regs->pc);
4333+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4334+ if (err)
4335+ break;
4336+
4337+ if (save == 0x9DE3BFA8U &&
4338+ (call & 0xC0000000U) == 0x40000000U &&
4339+ nop == 0x01000000U)
4340+ {
4341+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4342+
4343+ regs->u_regs[UREG_RETPC] = regs->pc;
4344+ regs->pc = dl_resolve;
4345+ regs->npc = dl_resolve+4;
4346+ return 3;
4347+ }
4348+ } while (0);
4349+#endif
4350+
4351+ return 1;
4352+}
4353+
4354+void pax_report_insns(void *pc, void *sp)
4355+{
4356+ unsigned long i;
4357+
4358+ printk(KERN_ERR "PAX: bytes at PC: ");
4359+ for (i = 0; i < 8; i++) {
4360+ unsigned int c;
4361+ if (get_user(c, (unsigned int *)pc+i))
4362+ printk(KERN_CONT "???????? ");
4363+ else
4364+ printk(KERN_CONT "%08x ", c);
4365+ }
4366+ printk("\n");
4367+}
4368+#endif
4369+
4370 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4371 int text_fault)
4372 {
4373@@ -281,6 +546,24 @@ good_area:
4374 if(!(vma->vm_flags & VM_WRITE))
4375 goto bad_area;
4376 } else {
4377+
4378+#ifdef CONFIG_PAX_PAGEEXEC
4379+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4380+ up_read(&mm->mmap_sem);
4381+ switch (pax_handle_fetch_fault(regs)) {
4382+
4383+#ifdef CONFIG_PAX_EMUPLT
4384+ case 2:
4385+ case 3:
4386+ return;
4387+#endif
4388+
4389+ }
4390+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4391+ do_group_exit(SIGKILL);
4392+ }
4393+#endif
4394+
4395 /* Allow reads even for write-only mappings */
4396 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4397 goto bad_area;
4398diff -urNp linux-3.0.7/arch/sparc/mm/fault_64.c linux-3.0.7/arch/sparc/mm/fault_64.c
4399--- linux-3.0.7/arch/sparc/mm/fault_64.c 2011-07-21 22:17:23.000000000 -0400
4400+++ linux-3.0.7/arch/sparc/mm/fault_64.c 2011-08-23 21:48:14.000000000 -0400
4401@@ -21,6 +21,9 @@
4402 #include <linux/kprobes.h>
4403 #include <linux/kdebug.h>
4404 #include <linux/percpu.h>
4405+#include <linux/slab.h>
4406+#include <linux/pagemap.h>
4407+#include <linux/compiler.h>
4408
4409 #include <asm/page.h>
4410 #include <asm/pgtable.h>
4411@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4412 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4413 regs->tpc);
4414 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4415- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4416+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4417 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4418 dump_stack();
4419 unhandled_fault(regs->tpc, current, regs);
4420@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4421 show_regs(regs);
4422 }
4423
4424+#ifdef CONFIG_PAX_PAGEEXEC
4425+#ifdef CONFIG_PAX_DLRESOLVE
4426+static void pax_emuplt_close(struct vm_area_struct *vma)
4427+{
4428+ vma->vm_mm->call_dl_resolve = 0UL;
4429+}
4430+
4431+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4432+{
4433+ unsigned int *kaddr;
4434+
4435+ vmf->page = alloc_page(GFP_HIGHUSER);
4436+ if (!vmf->page)
4437+ return VM_FAULT_OOM;
4438+
4439+ kaddr = kmap(vmf->page);
4440+ memset(kaddr, 0, PAGE_SIZE);
4441+ kaddr[0] = 0x9DE3BFA8U; /* save */
4442+ flush_dcache_page(vmf->page);
4443+ kunmap(vmf->page);
4444+ return VM_FAULT_MAJOR;
4445+}
4446+
4447+static const struct vm_operations_struct pax_vm_ops = {
4448+ .close = pax_emuplt_close,
4449+ .fault = pax_emuplt_fault
4450+};
4451+
4452+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4453+{
4454+ int ret;
4455+
4456+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4457+ vma->vm_mm = current->mm;
4458+ vma->vm_start = addr;
4459+ vma->vm_end = addr + PAGE_SIZE;
4460+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4461+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4462+ vma->vm_ops = &pax_vm_ops;
4463+
4464+ ret = insert_vm_struct(current->mm, vma);
4465+ if (ret)
4466+ return ret;
4467+
4468+ ++current->mm->total_vm;
4469+ return 0;
4470+}
4471+#endif
4472+
4473+/*
4474+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4475+ *
4476+ * returns 1 when task should be killed
4477+ * 2 when patched PLT trampoline was detected
4478+ * 3 when unpatched PLT trampoline was detected
4479+ */
4480+static int pax_handle_fetch_fault(struct pt_regs *regs)
4481+{
4482+
4483+#ifdef CONFIG_PAX_EMUPLT
4484+ int err;
4485+
4486+ do { /* PaX: patched PLT emulation #1 */
4487+ unsigned int sethi1, sethi2, jmpl;
4488+
4489+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4490+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4491+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4492+
4493+ if (err)
4494+ break;
4495+
4496+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4497+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4498+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4499+ {
4500+ unsigned long addr;
4501+
4502+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4503+ addr = regs->u_regs[UREG_G1];
4504+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4505+
4506+ if (test_thread_flag(TIF_32BIT))
4507+ addr &= 0xFFFFFFFFUL;
4508+
4509+ regs->tpc = addr;
4510+ regs->tnpc = addr+4;
4511+ return 2;
4512+ }
4513+ } while (0);
4514+
4515+ { /* PaX: patched PLT emulation #2 */
4516+ unsigned int ba;
4517+
4518+ err = get_user(ba, (unsigned int *)regs->tpc);
4519+
4520+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4521+ unsigned long addr;
4522+
4523+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4524+
4525+ if (test_thread_flag(TIF_32BIT))
4526+ addr &= 0xFFFFFFFFUL;
4527+
4528+ regs->tpc = addr;
4529+ regs->tnpc = addr+4;
4530+ return 2;
4531+ }
4532+ }
4533+
4534+ do { /* PaX: patched PLT emulation #3 */
4535+ unsigned int sethi, jmpl, nop;
4536+
4537+ err = get_user(sethi, (unsigned int *)regs->tpc);
4538+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4539+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4540+
4541+ if (err)
4542+ break;
4543+
4544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4545+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4546+ nop == 0x01000000U)
4547+ {
4548+ unsigned long addr;
4549+
4550+ addr = (sethi & 0x003FFFFFU) << 10;
4551+ regs->u_regs[UREG_G1] = addr;
4552+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4553+
4554+ if (test_thread_flag(TIF_32BIT))
4555+ addr &= 0xFFFFFFFFUL;
4556+
4557+ regs->tpc = addr;
4558+ regs->tnpc = addr+4;
4559+ return 2;
4560+ }
4561+ } while (0);
4562+
4563+ do { /* PaX: patched PLT emulation #4 */
4564+ unsigned int sethi, mov1, call, mov2;
4565+
4566+ err = get_user(sethi, (unsigned int *)regs->tpc);
4567+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4568+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4569+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4570+
4571+ if (err)
4572+ break;
4573+
4574+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575+ mov1 == 0x8210000FU &&
4576+ (call & 0xC0000000U) == 0x40000000U &&
4577+ mov2 == 0x9E100001U)
4578+ {
4579+ unsigned long addr;
4580+
4581+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4582+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4583+
4584+ if (test_thread_flag(TIF_32BIT))
4585+ addr &= 0xFFFFFFFFUL;
4586+
4587+ regs->tpc = addr;
4588+ regs->tnpc = addr+4;
4589+ return 2;
4590+ }
4591+ } while (0);
4592+
4593+ do { /* PaX: patched PLT emulation #5 */
4594+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4595+
4596+ err = get_user(sethi, (unsigned int *)regs->tpc);
4597+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4598+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4599+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4600+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4601+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4602+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4603+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4604+
4605+ if (err)
4606+ break;
4607+
4608+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4609+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4610+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4611+ (or1 & 0xFFFFE000U) == 0x82106000U &&
4612+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4613+ sllx == 0x83287020U &&
4614+ jmpl == 0x81C04005U &&
4615+ nop == 0x01000000U)
4616+ {
4617+ unsigned long addr;
4618+
4619+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4620+ regs->u_regs[UREG_G1] <<= 32;
4621+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4622+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4623+ regs->tpc = addr;
4624+ regs->tnpc = addr+4;
4625+ return 2;
4626+ }
4627+ } while (0);
4628+
4629+ do { /* PaX: patched PLT emulation #6 */
4630+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4631+
4632+ err = get_user(sethi, (unsigned int *)regs->tpc);
4633+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4634+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4635+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4636+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4638+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4639+
4640+ if (err)
4641+ break;
4642+
4643+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4644+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4645+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4646+ sllx == 0x83287020U &&
4647+ (or & 0xFFFFE000U) == 0x8A116000U &&
4648+ jmpl == 0x81C04005U &&
4649+ nop == 0x01000000U)
4650+ {
4651+ unsigned long addr;
4652+
4653+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4654+ regs->u_regs[UREG_G1] <<= 32;
4655+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4656+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4657+ regs->tpc = addr;
4658+ regs->tnpc = addr+4;
4659+ return 2;
4660+ }
4661+ } while (0);
4662+
4663+ do { /* PaX: unpatched PLT emulation step 1 */
4664+ unsigned int sethi, ba, nop;
4665+
4666+ err = get_user(sethi, (unsigned int *)regs->tpc);
4667+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4668+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4669+
4670+ if (err)
4671+ break;
4672+
4673+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4675+ nop == 0x01000000U)
4676+ {
4677+ unsigned long addr;
4678+ unsigned int save, call;
4679+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4680+
4681+ if ((ba & 0xFFC00000U) == 0x30800000U)
4682+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4683+ else
4684+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4685+
4686+ if (test_thread_flag(TIF_32BIT))
4687+ addr &= 0xFFFFFFFFUL;
4688+
4689+ err = get_user(save, (unsigned int *)addr);
4690+ err |= get_user(call, (unsigned int *)(addr+4));
4691+ err |= get_user(nop, (unsigned int *)(addr+8));
4692+ if (err)
4693+ break;
4694+
4695+#ifdef CONFIG_PAX_DLRESOLVE
4696+ if (save == 0x9DE3BFA8U &&
4697+ (call & 0xC0000000U) == 0x40000000U &&
4698+ nop == 0x01000000U)
4699+ {
4700+ struct vm_area_struct *vma;
4701+ unsigned long call_dl_resolve;
4702+
4703+ down_read(&current->mm->mmap_sem);
4704+ call_dl_resolve = current->mm->call_dl_resolve;
4705+ up_read(&current->mm->mmap_sem);
4706+ if (likely(call_dl_resolve))
4707+ goto emulate;
4708+
4709+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4710+
4711+ down_write(&current->mm->mmap_sem);
4712+ if (current->mm->call_dl_resolve) {
4713+ call_dl_resolve = current->mm->call_dl_resolve;
4714+ up_write(&current->mm->mmap_sem);
4715+ if (vma)
4716+ kmem_cache_free(vm_area_cachep, vma);
4717+ goto emulate;
4718+ }
4719+
4720+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4721+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4722+ up_write(&current->mm->mmap_sem);
4723+ if (vma)
4724+ kmem_cache_free(vm_area_cachep, vma);
4725+ return 1;
4726+ }
4727+
4728+ if (pax_insert_vma(vma, call_dl_resolve)) {
4729+ up_write(&current->mm->mmap_sem);
4730+ kmem_cache_free(vm_area_cachep, vma);
4731+ return 1;
4732+ }
4733+
4734+ current->mm->call_dl_resolve = call_dl_resolve;
4735+ up_write(&current->mm->mmap_sem);
4736+
4737+emulate:
4738+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4739+ regs->tpc = call_dl_resolve;
4740+ regs->tnpc = addr+4;
4741+ return 3;
4742+ }
4743+#endif
4744+
4745+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4746+ if ((save & 0xFFC00000U) == 0x05000000U &&
4747+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4748+ nop == 0x01000000U)
4749+ {
4750+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4751+ regs->u_regs[UREG_G2] = addr + 4;
4752+ addr = (save & 0x003FFFFFU) << 10;
4753+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4754+
4755+ if (test_thread_flag(TIF_32BIT))
4756+ addr &= 0xFFFFFFFFUL;
4757+
4758+ regs->tpc = addr;
4759+ regs->tnpc = addr+4;
4760+ return 3;
4761+ }
4762+
4763+ /* PaX: 64-bit PLT stub */
4764+ err = get_user(sethi1, (unsigned int *)addr);
4765+ err |= get_user(sethi2, (unsigned int *)(addr+4));
4766+ err |= get_user(or1, (unsigned int *)(addr+8));
4767+ err |= get_user(or2, (unsigned int *)(addr+12));
4768+ err |= get_user(sllx, (unsigned int *)(addr+16));
4769+ err |= get_user(add, (unsigned int *)(addr+20));
4770+ err |= get_user(jmpl, (unsigned int *)(addr+24));
4771+ err |= get_user(nop, (unsigned int *)(addr+28));
4772+ if (err)
4773+ break;
4774+
4775+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4776+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4777+ (or1 & 0xFFFFE000U) == 0x88112000U &&
4778+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4779+ sllx == 0x89293020U &&
4780+ add == 0x8A010005U &&
4781+ jmpl == 0x89C14000U &&
4782+ nop == 0x01000000U)
4783+ {
4784+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4786+ regs->u_regs[UREG_G4] <<= 32;
4787+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4788+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4789+ regs->u_regs[UREG_G4] = addr + 24;
4790+ addr = regs->u_regs[UREG_G5];
4791+ regs->tpc = addr;
4792+ regs->tnpc = addr+4;
4793+ return 3;
4794+ }
4795+ }
4796+ } while (0);
4797+
4798+#ifdef CONFIG_PAX_DLRESOLVE
4799+ do { /* PaX: unpatched PLT emulation step 2 */
4800+ unsigned int save, call, nop;
4801+
4802+ err = get_user(save, (unsigned int *)(regs->tpc-4));
4803+ err |= get_user(call, (unsigned int *)regs->tpc);
4804+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4805+ if (err)
4806+ break;
4807+
4808+ if (save == 0x9DE3BFA8U &&
4809+ (call & 0xC0000000U) == 0x40000000U &&
4810+ nop == 0x01000000U)
4811+ {
4812+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4813+
4814+ if (test_thread_flag(TIF_32BIT))
4815+ dl_resolve &= 0xFFFFFFFFUL;
4816+
4817+ regs->u_regs[UREG_RETPC] = regs->tpc;
4818+ regs->tpc = dl_resolve;
4819+ regs->tnpc = dl_resolve+4;
4820+ return 3;
4821+ }
4822+ } while (0);
4823+#endif
4824+
4825+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4826+ unsigned int sethi, ba, nop;
4827+
4828+ err = get_user(sethi, (unsigned int *)regs->tpc);
4829+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4830+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4831+
4832+ if (err)
4833+ break;
4834+
4835+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4836+ (ba & 0xFFF00000U) == 0x30600000U &&
4837+ nop == 0x01000000U)
4838+ {
4839+ unsigned long addr;
4840+
4841+ addr = (sethi & 0x003FFFFFU) << 10;
4842+ regs->u_regs[UREG_G1] = addr;
4843+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4844+
4845+ if (test_thread_flag(TIF_32BIT))
4846+ addr &= 0xFFFFFFFFUL;
4847+
4848+ regs->tpc = addr;
4849+ regs->tnpc = addr+4;
4850+ return 2;
4851+ }
4852+ } while (0);
4853+
4854+#endif
4855+
4856+ return 1;
4857+}
4858+
4859+void pax_report_insns(void *pc, void *sp)
4860+{
4861+ unsigned long i;
4862+
4863+ printk(KERN_ERR "PAX: bytes at PC: ");
4864+ for (i = 0; i < 8; i++) {
4865+ unsigned int c;
4866+ if (get_user(c, (unsigned int *)pc+i))
4867+ printk(KERN_CONT "???????? ");
4868+ else
4869+ printk(KERN_CONT "%08x ", c);
4870+ }
4871+ printk("\n");
4872+}
4873+#endif
4874+
4875 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4876 {
4877 struct mm_struct *mm = current->mm;
4878@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4879 if (!vma)
4880 goto bad_area;
4881
4882+#ifdef CONFIG_PAX_PAGEEXEC
4883+ /* PaX: detect ITLB misses on non-exec pages */
4884+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4885+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4886+ {
4887+ if (address != regs->tpc)
4888+ goto good_area;
4889+
4890+ up_read(&mm->mmap_sem);
4891+ switch (pax_handle_fetch_fault(regs)) {
4892+
4893+#ifdef CONFIG_PAX_EMUPLT
4894+ case 2:
4895+ case 3:
4896+ return;
4897+#endif
4898+
4899+ }
4900+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4901+ do_group_exit(SIGKILL);
4902+ }
4903+#endif
4904+
4905 /* Pure DTLB misses do not tell us whether the fault causing
4906 * load/store/atomic was a write or not, it only says that there
4907 * was no match. So in such a case we (carefully) read the
4908diff -urNp linux-3.0.7/arch/sparc/mm/hugetlbpage.c linux-3.0.7/arch/sparc/mm/hugetlbpage.c
4909--- linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
4910+++ linux-3.0.7/arch/sparc/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
4911@@ -68,7 +68,7 @@ full_search:
4912 }
4913 return -ENOMEM;
4914 }
4915- if (likely(!vma || addr + len <= vma->vm_start)) {
4916+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4917 /*
4918 * Remember the place where we stopped the search:
4919 */
4920@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4921 /* make sure it can fit in the remaining address space */
4922 if (likely(addr > len)) {
4923 vma = find_vma(mm, addr-len);
4924- if (!vma || addr <= vma->vm_start) {
4925+ if (check_heap_stack_gap(vma, addr - len, len)) {
4926 /* remember the address as a hint for next time */
4927 return (mm->free_area_cache = addr-len);
4928 }
4929@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4930 if (unlikely(mm->mmap_base < len))
4931 goto bottomup;
4932
4933- addr = (mm->mmap_base-len) & HPAGE_MASK;
4934+ addr = mm->mmap_base - len;
4935
4936 do {
4937+ addr &= HPAGE_MASK;
4938 /*
4939 * Lookup failure means no vma is above this address,
4940 * else if new region fits below vma->vm_start,
4941 * return with success:
4942 */
4943 vma = find_vma(mm, addr);
4944- if (likely(!vma || addr+len <= vma->vm_start)) {
4945+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4946 /* remember the address as a hint for next time */
4947 return (mm->free_area_cache = addr);
4948 }
4949@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4950 mm->cached_hole_size = vma->vm_start - addr;
4951
4952 /* try just below the current vma->vm_start */
4953- addr = (vma->vm_start-len) & HPAGE_MASK;
4954- } while (likely(len < vma->vm_start));
4955+ addr = skip_heap_stack_gap(vma, len);
4956+ } while (!IS_ERR_VALUE(addr));
4957
4958 bottomup:
4959 /*
4960@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4961 if (addr) {
4962 addr = ALIGN(addr, HPAGE_SIZE);
4963 vma = find_vma(mm, addr);
4964- if (task_size - len >= addr &&
4965- (!vma || addr + len <= vma->vm_start))
4966+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4967 return addr;
4968 }
4969 if (mm->get_unmapped_area == arch_get_unmapped_area)
4970diff -urNp linux-3.0.7/arch/sparc/mm/init_32.c linux-3.0.7/arch/sparc/mm/init_32.c
4971--- linux-3.0.7/arch/sparc/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
4972+++ linux-3.0.7/arch/sparc/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
4973@@ -316,6 +316,9 @@ extern void device_scan(void);
4974 pgprot_t PAGE_SHARED __read_mostly;
4975 EXPORT_SYMBOL(PAGE_SHARED);
4976
4977+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4978+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4979+
4980 void __init paging_init(void)
4981 {
4982 switch(sparc_cpu_model) {
4983@@ -344,17 +347,17 @@ void __init paging_init(void)
4984
4985 /* Initialize the protection map with non-constant, MMU dependent values. */
4986 protection_map[0] = PAGE_NONE;
4987- protection_map[1] = PAGE_READONLY;
4988- protection_map[2] = PAGE_COPY;
4989- protection_map[3] = PAGE_COPY;
4990+ protection_map[1] = PAGE_READONLY_NOEXEC;
4991+ protection_map[2] = PAGE_COPY_NOEXEC;
4992+ protection_map[3] = PAGE_COPY_NOEXEC;
4993 protection_map[4] = PAGE_READONLY;
4994 protection_map[5] = PAGE_READONLY;
4995 protection_map[6] = PAGE_COPY;
4996 protection_map[7] = PAGE_COPY;
4997 protection_map[8] = PAGE_NONE;
4998- protection_map[9] = PAGE_READONLY;
4999- protection_map[10] = PAGE_SHARED;
5000- protection_map[11] = PAGE_SHARED;
5001+ protection_map[9] = PAGE_READONLY_NOEXEC;
5002+ protection_map[10] = PAGE_SHARED_NOEXEC;
5003+ protection_map[11] = PAGE_SHARED_NOEXEC;
5004 protection_map[12] = PAGE_READONLY;
5005 protection_map[13] = PAGE_READONLY;
5006 protection_map[14] = PAGE_SHARED;
5007diff -urNp linux-3.0.7/arch/sparc/mm/Makefile linux-3.0.7/arch/sparc/mm/Makefile
5008--- linux-3.0.7/arch/sparc/mm/Makefile 2011-07-21 22:17:23.000000000 -0400
5009+++ linux-3.0.7/arch/sparc/mm/Makefile 2011-08-23 21:47:55.000000000 -0400
5010@@ -2,7 +2,7 @@
5011 #
5012
5013 asflags-y := -ansi
5014-ccflags-y := -Werror
5015+#ccflags-y := -Werror
5016
5017 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5018 obj-y += fault_$(BITS).o
5019diff -urNp linux-3.0.7/arch/sparc/mm/srmmu.c linux-3.0.7/arch/sparc/mm/srmmu.c
5020--- linux-3.0.7/arch/sparc/mm/srmmu.c 2011-07-21 22:17:23.000000000 -0400
5021+++ linux-3.0.7/arch/sparc/mm/srmmu.c 2011-08-23 21:47:55.000000000 -0400
5022@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5023 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5024 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5025 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5026+
5027+#ifdef CONFIG_PAX_PAGEEXEC
5028+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5029+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5030+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5031+#endif
5032+
5033 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5034 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5035
5036diff -urNp linux-3.0.7/arch/um/include/asm/kmap_types.h linux-3.0.7/arch/um/include/asm/kmap_types.h
5037--- linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
5038+++ linux-3.0.7/arch/um/include/asm/kmap_types.h 2011-08-23 21:47:55.000000000 -0400
5039@@ -23,6 +23,7 @@ enum km_type {
5040 KM_IRQ1,
5041 KM_SOFTIRQ0,
5042 KM_SOFTIRQ1,
5043+ KM_CLEARPAGE,
5044 KM_TYPE_NR
5045 };
5046
5047diff -urNp linux-3.0.7/arch/um/include/asm/page.h linux-3.0.7/arch/um/include/asm/page.h
5048--- linux-3.0.7/arch/um/include/asm/page.h 2011-07-21 22:17:23.000000000 -0400
5049+++ linux-3.0.7/arch/um/include/asm/page.h 2011-08-23 21:47:55.000000000 -0400
5050@@ -14,6 +14,9 @@
5051 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5052 #define PAGE_MASK (~(PAGE_SIZE-1))
5053
5054+#define ktla_ktva(addr) (addr)
5055+#define ktva_ktla(addr) (addr)
5056+
5057 #ifndef __ASSEMBLY__
5058
5059 struct page;
5060diff -urNp linux-3.0.7/arch/um/kernel/process.c linux-3.0.7/arch/um/kernel/process.c
5061--- linux-3.0.7/arch/um/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
5062+++ linux-3.0.7/arch/um/kernel/process.c 2011-08-23 21:47:55.000000000 -0400
5063@@ -404,22 +404,6 @@ int singlestepping(void * t)
5064 return 2;
5065 }
5066
5067-/*
5068- * Only x86 and x86_64 have an arch_align_stack().
5069- * All other arches have "#define arch_align_stack(x) (x)"
5070- * in their asm/system.h
5071- * As this is included in UML from asm-um/system-generic.h,
5072- * we can use it to behave as the subarch does.
5073- */
5074-#ifndef arch_align_stack
5075-unsigned long arch_align_stack(unsigned long sp)
5076-{
5077- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5078- sp -= get_random_int() % 8192;
5079- return sp & ~0xf;
5080-}
5081-#endif
5082-
5083 unsigned long get_wchan(struct task_struct *p)
5084 {
5085 unsigned long stack_page, sp, ip;
5086diff -urNp linux-3.0.7/arch/um/sys-i386/syscalls.c linux-3.0.7/arch/um/sys-i386/syscalls.c
5087--- linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-07-21 22:17:23.000000000 -0400
5088+++ linux-3.0.7/arch/um/sys-i386/syscalls.c 2011-08-23 21:47:55.000000000 -0400
5089@@ -11,6 +11,21 @@
5090 #include "asm/uaccess.h"
5091 #include "asm/unistd.h"
5092
5093+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5094+{
5095+ unsigned long pax_task_size = TASK_SIZE;
5096+
5097+#ifdef CONFIG_PAX_SEGMEXEC
5098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5099+ pax_task_size = SEGMEXEC_TASK_SIZE;
5100+#endif
5101+
5102+ if (len > pax_task_size || addr > pax_task_size - len)
5103+ return -EINVAL;
5104+
5105+ return 0;
5106+}
5107+
5108 /*
5109 * The prototype on i386 is:
5110 *
5111diff -urNp linux-3.0.7/arch/x86/boot/bitops.h linux-3.0.7/arch/x86/boot/bitops.h
5112--- linux-3.0.7/arch/x86/boot/bitops.h 2011-07-21 22:17:23.000000000 -0400
5113+++ linux-3.0.7/arch/x86/boot/bitops.h 2011-08-23 21:47:55.000000000 -0400
5114@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5115 u8 v;
5116 const u32 *p = (const u32 *)addr;
5117
5118- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5119+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5120 return v;
5121 }
5122
5123@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5124
5125 static inline void set_bit(int nr, void *addr)
5126 {
5127- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5128+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5129 }
5130
5131 #endif /* BOOT_BITOPS_H */
5132diff -urNp linux-3.0.7/arch/x86/boot/boot.h linux-3.0.7/arch/x86/boot/boot.h
5133--- linux-3.0.7/arch/x86/boot/boot.h 2011-07-21 22:17:23.000000000 -0400
5134+++ linux-3.0.7/arch/x86/boot/boot.h 2011-08-23 21:47:55.000000000 -0400
5135@@ -85,7 +85,7 @@ static inline void io_delay(void)
5136 static inline u16 ds(void)
5137 {
5138 u16 seg;
5139- asm("movw %%ds,%0" : "=rm" (seg));
5140+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5141 return seg;
5142 }
5143
5144@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5145 static inline int memcmp(const void *s1, const void *s2, size_t len)
5146 {
5147 u8 diff;
5148- asm("repe; cmpsb; setnz %0"
5149+ asm volatile("repe; cmpsb; setnz %0"
5150 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5151 return diff;
5152 }
5153diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_32.S linux-3.0.7/arch/x86/boot/compressed/head_32.S
5154--- linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-07-21 22:17:23.000000000 -0400
5155+++ linux-3.0.7/arch/x86/boot/compressed/head_32.S 2011-08-23 21:47:55.000000000 -0400
5156@@ -76,7 +76,7 @@ ENTRY(startup_32)
5157 notl %eax
5158 andl %eax, %ebx
5159 #else
5160- movl $LOAD_PHYSICAL_ADDR, %ebx
5161+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5162 #endif
5163
5164 /* Target address to relocate to for decompression */
5165@@ -162,7 +162,7 @@ relocated:
5166 * and where it was actually loaded.
5167 */
5168 movl %ebp, %ebx
5169- subl $LOAD_PHYSICAL_ADDR, %ebx
5170+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5171 jz 2f /* Nothing to be done if loaded at compiled addr. */
5172 /*
5173 * Process relocations.
5174@@ -170,8 +170,7 @@ relocated:
5175
5176 1: subl $4, %edi
5177 movl (%edi), %ecx
5178- testl %ecx, %ecx
5179- jz 2f
5180+ jecxz 2f
5181 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5182 jmp 1b
5183 2:
5184diff -urNp linux-3.0.7/arch/x86/boot/compressed/head_64.S linux-3.0.7/arch/x86/boot/compressed/head_64.S
5185--- linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-07-21 22:17:23.000000000 -0400
5186+++ linux-3.0.7/arch/x86/boot/compressed/head_64.S 2011-08-23 21:47:55.000000000 -0400
5187@@ -91,7 +91,7 @@ ENTRY(startup_32)
5188 notl %eax
5189 andl %eax, %ebx
5190 #else
5191- movl $LOAD_PHYSICAL_ADDR, %ebx
5192+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5193 #endif
5194
5195 /* Target address to relocate to for decompression */
5196@@ -233,7 +233,7 @@ ENTRY(startup_64)
5197 notq %rax
5198 andq %rax, %rbp
5199 #else
5200- movq $LOAD_PHYSICAL_ADDR, %rbp
5201+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5202 #endif
5203
5204 /* Target address to relocate to for decompression */
5205diff -urNp linux-3.0.7/arch/x86/boot/compressed/Makefile linux-3.0.7/arch/x86/boot/compressed/Makefile
5206--- linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-07-21 22:17:23.000000000 -0400
5207+++ linux-3.0.7/arch/x86/boot/compressed/Makefile 2011-08-23 21:47:55.000000000 -0400
5208@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5209 KBUILD_CFLAGS += $(cflags-y)
5210 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5211 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5212+ifdef CONSTIFY_PLUGIN
5213+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5214+endif
5215
5216 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5217 GCOV_PROFILE := n
5218diff -urNp linux-3.0.7/arch/x86/boot/compressed/misc.c linux-3.0.7/arch/x86/boot/compressed/misc.c
5219--- linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-07-21 22:17:23.000000000 -0400
5220+++ linux-3.0.7/arch/x86/boot/compressed/misc.c 2011-08-23 21:47:55.000000000 -0400
5221@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5222 case PT_LOAD:
5223 #ifdef CONFIG_RELOCATABLE
5224 dest = output;
5225- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5226+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5227 #else
5228 dest = (void *)(phdr->p_paddr);
5229 #endif
5230@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5231 error("Destination address too large");
5232 #endif
5233 #ifndef CONFIG_RELOCATABLE
5234- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5235+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5236 error("Wrong destination address");
5237 #endif
5238
5239diff -urNp linux-3.0.7/arch/x86/boot/compressed/relocs.c linux-3.0.7/arch/x86/boot/compressed/relocs.c
5240--- linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-07-21 22:17:23.000000000 -0400
5241+++ linux-3.0.7/arch/x86/boot/compressed/relocs.c 2011-08-23 21:47:55.000000000 -0400
5242@@ -13,8 +13,11 @@
5243
5244 static void die(char *fmt, ...);
5245
5246+#include "../../../../include/generated/autoconf.h"
5247+
5248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5249 static Elf32_Ehdr ehdr;
5250+static Elf32_Phdr *phdr;
5251 static unsigned long reloc_count, reloc_idx;
5252 static unsigned long *relocs;
5253
5254@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5255 }
5256 }
5257
5258+static void read_phdrs(FILE *fp)
5259+{
5260+ unsigned int i;
5261+
5262+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5263+ if (!phdr) {
5264+ die("Unable to allocate %d program headers\n",
5265+ ehdr.e_phnum);
5266+ }
5267+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5268+ die("Seek to %d failed: %s\n",
5269+ ehdr.e_phoff, strerror(errno));
5270+ }
5271+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5272+ die("Cannot read ELF program headers: %s\n",
5273+ strerror(errno));
5274+ }
5275+ for(i = 0; i < ehdr.e_phnum; i++) {
5276+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5277+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5278+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5279+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5280+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5281+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5282+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5283+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5284+ }
5285+
5286+}
5287+
5288 static void read_shdrs(FILE *fp)
5289 {
5290- int i;
5291+ unsigned int i;
5292 Elf32_Shdr shdr;
5293
5294 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5295@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5296
5297 static void read_strtabs(FILE *fp)
5298 {
5299- int i;
5300+ unsigned int i;
5301 for (i = 0; i < ehdr.e_shnum; i++) {
5302 struct section *sec = &secs[i];
5303 if (sec->shdr.sh_type != SHT_STRTAB) {
5304@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5305
5306 static void read_symtabs(FILE *fp)
5307 {
5308- int i,j;
5309+ unsigned int i,j;
5310 for (i = 0; i < ehdr.e_shnum; i++) {
5311 struct section *sec = &secs[i];
5312 if (sec->shdr.sh_type != SHT_SYMTAB) {
5313@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5314
5315 static void read_relocs(FILE *fp)
5316 {
5317- int i,j;
5318+ unsigned int i,j;
5319+ uint32_t base;
5320+
5321 for (i = 0; i < ehdr.e_shnum; i++) {
5322 struct section *sec = &secs[i];
5323 if (sec->shdr.sh_type != SHT_REL) {
5324@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5325 die("Cannot read symbol table: %s\n",
5326 strerror(errno));
5327 }
5328+ base = 0;
5329+ for (j = 0; j < ehdr.e_phnum; j++) {
5330+ if (phdr[j].p_type != PT_LOAD )
5331+ continue;
5332+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5333+ continue;
5334+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5335+ break;
5336+ }
5337 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5338 Elf32_Rel *rel = &sec->reltab[j];
5339- rel->r_offset = elf32_to_cpu(rel->r_offset);
5340+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5341 rel->r_info = elf32_to_cpu(rel->r_info);
5342 }
5343 }
5344@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5345
5346 static void print_absolute_symbols(void)
5347 {
5348- int i;
5349+ unsigned int i;
5350 printf("Absolute symbols\n");
5351 printf(" Num: Value Size Type Bind Visibility Name\n");
5352 for (i = 0; i < ehdr.e_shnum; i++) {
5353 struct section *sec = &secs[i];
5354 char *sym_strtab;
5355 Elf32_Sym *sh_symtab;
5356- int j;
5357+ unsigned int j;
5358
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 continue;
5361@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5362
5363 static void print_absolute_relocs(void)
5364 {
5365- int i, printed = 0;
5366+ unsigned int i, printed = 0;
5367
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 struct section *sec_applies, *sec_symtab;
5371 char *sym_strtab;
5372 Elf32_Sym *sh_symtab;
5373- int j;
5374+ unsigned int j;
5375 if (sec->shdr.sh_type != SHT_REL) {
5376 continue;
5377 }
5378@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5379
5380 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5381 {
5382- int i;
5383+ unsigned int i;
5384 /* Walk through the relocations */
5385 for (i = 0; i < ehdr.e_shnum; i++) {
5386 char *sym_strtab;
5387 Elf32_Sym *sh_symtab;
5388 struct section *sec_applies, *sec_symtab;
5389- int j;
5390+ unsigned int j;
5391 struct section *sec = &secs[i];
5392
5393 if (sec->shdr.sh_type != SHT_REL) {
5394@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5395 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5396 continue;
5397 }
5398+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5399+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5400+ continue;
5401+
5402+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5403+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5404+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5405+ continue;
5406+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5407+ continue;
5408+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5409+ continue;
5410+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5411+ continue;
5412+#endif
5413+
5414 switch (r_type) {
5415 case R_386_NONE:
5416 case R_386_PC32:
5417@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5418
5419 static void emit_relocs(int as_text)
5420 {
5421- int i;
5422+ unsigned int i;
5423 /* Count how many relocations I have and allocate space for them. */
5424 reloc_count = 0;
5425 walk_relocs(count_reloc);
5426@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5427 fname, strerror(errno));
5428 }
5429 read_ehdr(fp);
5430+ read_phdrs(fp);
5431 read_shdrs(fp);
5432 read_strtabs(fp);
5433 read_symtabs(fp);
5434diff -urNp linux-3.0.7/arch/x86/boot/cpucheck.c linux-3.0.7/arch/x86/boot/cpucheck.c
5435--- linux-3.0.7/arch/x86/boot/cpucheck.c 2011-07-21 22:17:23.000000000 -0400
5436+++ linux-3.0.7/arch/x86/boot/cpucheck.c 2011-08-23 21:47:55.000000000 -0400
5437@@ -74,7 +74,7 @@ static int has_fpu(void)
5438 u16 fcw = -1, fsw = -1;
5439 u32 cr0;
5440
5441- asm("movl %%cr0,%0" : "=r" (cr0));
5442+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5443 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5444 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5445 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5446@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5447 {
5448 u32 f0, f1;
5449
5450- asm("pushfl ; "
5451+ asm volatile("pushfl ; "
5452 "pushfl ; "
5453 "popl %0 ; "
5454 "movl %0,%1 ; "
5455@@ -115,7 +115,7 @@ static void get_flags(void)
5456 set_bit(X86_FEATURE_FPU, cpu.flags);
5457
5458 if (has_eflag(X86_EFLAGS_ID)) {
5459- asm("cpuid"
5460+ asm volatile("cpuid"
5461 : "=a" (max_intel_level),
5462 "=b" (cpu_vendor[0]),
5463 "=d" (cpu_vendor[1]),
5464@@ -124,7 +124,7 @@ static void get_flags(void)
5465
5466 if (max_intel_level >= 0x00000001 &&
5467 max_intel_level <= 0x0000ffff) {
5468- asm("cpuid"
5469+ asm volatile("cpuid"
5470 : "=a" (tfms),
5471 "=c" (cpu.flags[4]),
5472 "=d" (cpu.flags[0])
5473@@ -136,7 +136,7 @@ static void get_flags(void)
5474 cpu.model += ((tfms >> 16) & 0xf) << 4;
5475 }
5476
5477- asm("cpuid"
5478+ asm volatile("cpuid"
5479 : "=a" (max_amd_level)
5480 : "a" (0x80000000)
5481 : "ebx", "ecx", "edx");
5482@@ -144,7 +144,7 @@ static void get_flags(void)
5483 if (max_amd_level >= 0x80000001 &&
5484 max_amd_level <= 0x8000ffff) {
5485 u32 eax = 0x80000001;
5486- asm("cpuid"
5487+ asm volatile("cpuid"
5488 : "+a" (eax),
5489 "=c" (cpu.flags[6]),
5490 "=d" (cpu.flags[1])
5491@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5492 u32 ecx = MSR_K7_HWCR;
5493 u32 eax, edx;
5494
5495- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5496+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5497 eax &= ~(1 << 15);
5498- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5499+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5500
5501 get_flags(); /* Make sure it really did something */
5502 err = check_flags();
5503@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5504 u32 ecx = MSR_VIA_FCR;
5505 u32 eax, edx;
5506
5507- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5508+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5509 eax |= (1<<1)|(1<<7);
5510- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5511+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5512
5513 set_bit(X86_FEATURE_CX8, cpu.flags);
5514 err = check_flags();
5515@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5516 u32 eax, edx;
5517 u32 level = 1;
5518
5519- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5520- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5521- asm("cpuid"
5522+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5523+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5524+ asm volatile("cpuid"
5525 : "+a" (level), "=d" (cpu.flags[0])
5526 : : "ecx", "ebx");
5527- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5528+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529
5530 err = check_flags();
5531 }
5532diff -urNp linux-3.0.7/arch/x86/boot/header.S linux-3.0.7/arch/x86/boot/header.S
5533--- linux-3.0.7/arch/x86/boot/header.S 2011-07-21 22:17:23.000000000 -0400
5534+++ linux-3.0.7/arch/x86/boot/header.S 2011-08-23 21:47:55.000000000 -0400
5535@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5536 # single linked list of
5537 # struct setup_data
5538
5539-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5540+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5541
5542 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5543 #define VO_INIT_SIZE (VO__end - VO__text)
5544diff -urNp linux-3.0.7/arch/x86/boot/Makefile linux-3.0.7/arch/x86/boot/Makefile
5545--- linux-3.0.7/arch/x86/boot/Makefile 2011-07-21 22:17:23.000000000 -0400
5546+++ linux-3.0.7/arch/x86/boot/Makefile 2011-08-23 21:47:55.000000000 -0400
5547@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5548 $(call cc-option, -fno-stack-protector) \
5549 $(call cc-option, -mpreferred-stack-boundary=2)
5550 KBUILD_CFLAGS += $(call cc-option, -m32)
5551+ifdef CONSTIFY_PLUGIN
5552+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5553+endif
5554 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5555 GCOV_PROFILE := n
5556
5557diff -urNp linux-3.0.7/arch/x86/boot/memory.c linux-3.0.7/arch/x86/boot/memory.c
5558--- linux-3.0.7/arch/x86/boot/memory.c 2011-07-21 22:17:23.000000000 -0400
5559+++ linux-3.0.7/arch/x86/boot/memory.c 2011-08-23 21:47:55.000000000 -0400
5560@@ -19,7 +19,7 @@
5561
5562 static int detect_memory_e820(void)
5563 {
5564- int count = 0;
5565+ unsigned int count = 0;
5566 struct biosregs ireg, oreg;
5567 struct e820entry *desc = boot_params.e820_map;
5568 static struct e820entry buf; /* static so it is zeroed */
5569diff -urNp linux-3.0.7/arch/x86/boot/video.c linux-3.0.7/arch/x86/boot/video.c
5570--- linux-3.0.7/arch/x86/boot/video.c 2011-07-21 22:17:23.000000000 -0400
5571+++ linux-3.0.7/arch/x86/boot/video.c 2011-08-23 21:47:55.000000000 -0400
5572@@ -96,7 +96,7 @@ static void store_mode_params(void)
5573 static unsigned int get_entry(void)
5574 {
5575 char entry_buf[4];
5576- int i, len = 0;
5577+ unsigned int i, len = 0;
5578 int key;
5579 unsigned int v;
5580
5581diff -urNp linux-3.0.7/arch/x86/boot/video-vesa.c linux-3.0.7/arch/x86/boot/video-vesa.c
5582--- linux-3.0.7/arch/x86/boot/video-vesa.c 2011-07-21 22:17:23.000000000 -0400
5583+++ linux-3.0.7/arch/x86/boot/video-vesa.c 2011-08-23 21:47:55.000000000 -0400
5584@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5585
5586 boot_params.screen_info.vesapm_seg = oreg.es;
5587 boot_params.screen_info.vesapm_off = oreg.di;
5588+ boot_params.screen_info.vesapm_size = oreg.cx;
5589 }
5590
5591 /*
5592diff -urNp linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S
5593--- linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5594+++ linux-3.0.7/arch/x86/crypto/aes-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5595@@ -8,6 +8,8 @@
5596 * including this sentence is retained in full.
5597 */
5598
5599+#include <asm/alternative-asm.h>
5600+
5601 .extern crypto_ft_tab
5602 .extern crypto_it_tab
5603 .extern crypto_fl_tab
5604@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5605 je B192; \
5606 leaq 32(r9),r9;
5607
5608+#define ret pax_force_retaddr; ret
5609+
5610 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5611 movq r1,r2; \
5612 movq r3,r4; \
5613diff -urNp linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S
5614--- linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5615+++ linux-3.0.7/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5616@@ -1,3 +1,5 @@
5617+#include <asm/alternative-asm.h>
5618+
5619 # enter ECRYPT_encrypt_bytes
5620 .text
5621 .p2align 5
5622@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5623 add %r11,%rsp
5624 mov %rdi,%rax
5625 mov %rsi,%rdx
5626+ pax_force_retaddr
5627 ret
5628 # bytesatleast65:
5629 ._bytesatleast65:
5630@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5631 add %r11,%rsp
5632 mov %rdi,%rax
5633 mov %rsi,%rdx
5634+ pax_force_retaddr
5635 ret
5636 # enter ECRYPT_ivsetup
5637 .text
5638@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5639 add %r11,%rsp
5640 mov %rdi,%rax
5641 mov %rsi,%rdx
5642+ pax_force_retaddr
5643 ret
5644diff -urNp linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S
5645--- linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-07-21 22:17:23.000000000 -0400
5646+++ linux-3.0.7/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-10-06 04:17:55.000000000 -0400
5647@@ -21,6 +21,7 @@
5648 .text
5649
5650 #include <asm/asm-offsets.h>
5651+#include <asm/alternative-asm.h>
5652
5653 #define a_offset 0
5654 #define b_offset 4
5655@@ -269,6 +270,7 @@ twofish_enc_blk:
5656
5657 popq R1
5658 movq $1,%rax
5659+ pax_force_retaddr
5660 ret
5661
5662 twofish_dec_blk:
5663@@ -321,4 +323,5 @@ twofish_dec_blk:
5664
5665 popq R1
5666 movq $1,%rax
5667+ pax_force_retaddr
5668 ret
5669diff -urNp linux-3.0.7/arch/x86/ia32/ia32_aout.c linux-3.0.7/arch/x86/ia32/ia32_aout.c
5670--- linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-07-21 22:17:23.000000000 -0400
5671+++ linux-3.0.7/arch/x86/ia32/ia32_aout.c 2011-08-23 21:48:14.000000000 -0400
5672@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5673 unsigned long dump_start, dump_size;
5674 struct user32 dump;
5675
5676+ memset(&dump, 0, sizeof(dump));
5677+
5678 fs = get_fs();
5679 set_fs(KERNEL_DS);
5680 has_dumped = 1;
5681diff -urNp linux-3.0.7/arch/x86/ia32/ia32entry.S linux-3.0.7/arch/x86/ia32/ia32entry.S
5682--- linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-07-21 22:17:23.000000000 -0400
5683+++ linux-3.0.7/arch/x86/ia32/ia32entry.S 2011-10-11 10:44:33.000000000 -0400
5684@@ -13,7 +13,9 @@
5685 #include <asm/thread_info.h>
5686 #include <asm/segment.h>
5687 #include <asm/irqflags.h>
5688+#include <asm/pgtable.h>
5689 #include <linux/linkage.h>
5690+#include <asm/alternative-asm.h>
5691
5692 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5693 #include <linux/elf-em.h>
5694@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5695 ENDPROC(native_irq_enable_sysexit)
5696 #endif
5697
5698+ .macro pax_enter_kernel_user
5699+#ifdef CONFIG_PAX_MEMORY_UDEREF
5700+ call pax_enter_kernel_user
5701+#endif
5702+ .endm
5703+
5704+ .macro pax_exit_kernel_user
5705+#ifdef CONFIG_PAX_MEMORY_UDEREF
5706+ call pax_exit_kernel_user
5707+#endif
5708+#ifdef CONFIG_PAX_RANDKSTACK
5709+ pushq %rax
5710+ call pax_randomize_kstack
5711+ popq %rax
5712+#endif
5713+ .endm
5714+
5715+ .macro pax_erase_kstack
5716+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5717+ call pax_erase_kstack
5718+#endif
5719+ .endm
5720+
5721 /*
5722 * 32bit SYSENTER instruction entry.
5723 *
5724@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5725 CFI_REGISTER rsp,rbp
5726 SWAPGS_UNSAFE_STACK
5727 movq PER_CPU_VAR(kernel_stack), %rsp
5728- addq $(KERNEL_STACK_OFFSET),%rsp
5729+ pax_enter_kernel_user
5730 /*
5731 * No need to follow this irqs on/off section: the syscall
5732 * disabled irqs, here we enable it straight after entry:
5733@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5734 CFI_REL_OFFSET rsp,0
5735 pushfq_cfi
5736 /*CFI_REL_OFFSET rflags,0*/
5737- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5738+ GET_THREAD_INFO(%r10)
5739+ movl TI_sysenter_return(%r10), %r10d
5740 CFI_REGISTER rip,r10
5741 pushq_cfi $__USER32_CS
5742 /*CFI_REL_OFFSET cs,0*/
5743@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5744 SAVE_ARGS 0,0,1
5745 /* no need to do an access_ok check here because rbp has been
5746 32bit zero extended */
5747+
5748+#ifdef CONFIG_PAX_MEMORY_UDEREF
5749+ mov $PAX_USER_SHADOW_BASE,%r10
5750+ add %r10,%rbp
5751+#endif
5752+
5753 1: movl (%rbp),%ebp
5754 .section __ex_table,"a"
5755 .quad 1b,ia32_badarg
5756@@ -168,6 +200,8 @@ sysenter_dispatch:
5757 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5758 jnz sysexit_audit
5759 sysexit_from_sys_call:
5760+ pax_exit_kernel_user
5761+ pax_erase_kstack
5762 andl $~TS_COMPAT,TI_status(%r10)
5763 /* clear IF, that popfq doesn't enable interrupts early */
5764 andl $~0x200,EFLAGS-R11(%rsp)
5765@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5766 movl %eax,%esi /* 2nd arg: syscall number */
5767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5768 call audit_syscall_entry
5769+
5770+ pax_erase_kstack
5771+
5772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5773 cmpq $(IA32_NR_syscalls-1),%rax
5774 ja ia32_badsys
5775@@ -246,6 +283,9 @@ sysenter_tracesys:
5776 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5777 movq %rsp,%rdi /* &pt_regs -> arg1 */
5778 call syscall_trace_enter
5779+
5780+ pax_erase_kstack
5781+
5782 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5783 RESTORE_REST
5784 cmpq $(IA32_NR_syscalls-1),%rax
5785@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5786 ENTRY(ia32_cstar_target)
5787 CFI_STARTPROC32 simple
5788 CFI_SIGNAL_FRAME
5789- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5790+ CFI_DEF_CFA rsp,0
5791 CFI_REGISTER rip,rcx
5792 /*CFI_REGISTER rflags,r11*/
5793 SWAPGS_UNSAFE_STACK
5794 movl %esp,%r8d
5795 CFI_REGISTER rsp,r8
5796 movq PER_CPU_VAR(kernel_stack),%rsp
5797+
5798+#ifdef CONFIG_PAX_MEMORY_UDEREF
5799+ pax_enter_kernel_user
5800+#endif
5801+
5802 /*
5803 * No need to follow this irqs on/off section: the syscall
5804 * disabled irqs and here we enable it straight after entry:
5805 */
5806 ENABLE_INTERRUPTS(CLBR_NONE)
5807- SAVE_ARGS 8,1,1
5808+ SAVE_ARGS 8*6,1,1
5809 movl %eax,%eax /* zero extension */
5810 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5811 movq %rcx,RIP-ARGOFFSET(%rsp)
5812@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5813 /* no need to do an access_ok check here because r8 has been
5814 32bit zero extended */
5815 /* hardware stack frame is complete now */
5816+
5817+#ifdef CONFIG_PAX_MEMORY_UDEREF
5818+ mov $PAX_USER_SHADOW_BASE,%r10
5819+ add %r10,%r8
5820+#endif
5821+
5822 1: movl (%r8),%r9d
5823 .section __ex_table,"a"
5824 .quad 1b,ia32_badarg
5825@@ -327,6 +378,8 @@ cstar_dispatch:
5826 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5827 jnz sysretl_audit
5828 sysretl_from_sys_call:
5829+ pax_exit_kernel_user
5830+ pax_erase_kstack
5831 andl $~TS_COMPAT,TI_status(%r10)
5832 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5833 movl RIP-ARGOFFSET(%rsp),%ecx
5834@@ -364,6 +417,9 @@ cstar_tracesys:
5835 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5836 movq %rsp,%rdi /* &pt_regs -> arg1 */
5837 call syscall_trace_enter
5838+
5839+ pax_erase_kstack
5840+
5841 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5842 RESTORE_REST
5843 xchgl %ebp,%r9d
5844@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5845 CFI_REL_OFFSET rip,RIP-RIP
5846 PARAVIRT_ADJUST_EXCEPTION_FRAME
5847 SWAPGS
5848+ pax_enter_kernel_user
5849 /*
5850 * No need to follow this irqs on/off section: the syscall
5851 * disabled irqs and here we enable it straight after entry:
5852@@ -441,6 +498,9 @@ ia32_tracesys:
5853 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5854 movq %rsp,%rdi /* &pt_regs -> arg1 */
5855 call syscall_trace_enter
5856+
5857+ pax_erase_kstack
5858+
5859 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5860 RESTORE_REST
5861 cmpq $(IA32_NR_syscalls-1),%rax
5862@@ -455,6 +515,7 @@ ia32_badsys:
5863
5864 quiet_ni_syscall:
5865 movq $-ENOSYS,%rax
5866+ pax_force_retaddr
5867 ret
5868 CFI_ENDPROC
5869
5870diff -urNp linux-3.0.7/arch/x86/ia32/ia32_signal.c linux-3.0.7/arch/x86/ia32/ia32_signal.c
5871--- linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-07-21 22:17:23.000000000 -0400
5872+++ linux-3.0.7/arch/x86/ia32/ia32_signal.c 2011-10-06 04:17:55.000000000 -0400
5873@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const
5874 }
5875 seg = get_fs();
5876 set_fs(KERNEL_DS);
5877- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5878+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5879 set_fs(seg);
5880 if (ret >= 0 && uoss_ptr) {
5881 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5882@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct
5883 */
5884 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5885 size_t frame_size,
5886- void **fpstate)
5887+ void __user **fpstate)
5888 {
5889 unsigned long sp;
5890
5891@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct
5892
5893 if (used_math()) {
5894 sp = sp - sig_xstate_ia32_size;
5895- *fpstate = (struct _fpstate_ia32 *) sp;
5896+ *fpstate = (struct _fpstate_ia32 __user *) sp;
5897 if (save_i387_xstate_ia32(*fpstate) < 0)
5898 return (void __user *) -1L;
5899 }
5900@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5901 sp -= frame_size;
5902 /* Align the stack pointer according to the i386 ABI,
5903 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5904- sp = ((sp + 4) & -16ul) - 4;
5905+ sp = ((sp - 12) & -16ul) - 4;
5906 return (void __user *) sp;
5907 }
5908
5909@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5910 * These are actually not used anymore, but left because some
5911 * gdb versions depend on them as a marker.
5912 */
5913- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5914+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5915 } put_user_catch(err);
5916
5917 if (err)
5918@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5919 0xb8,
5920 __NR_ia32_rt_sigreturn,
5921 0x80cd,
5922- 0,
5923+ 0
5924 };
5925
5926 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5927@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5928
5929 if (ka->sa.sa_flags & SA_RESTORER)
5930 restorer = ka->sa.sa_restorer;
5931+ else if (current->mm->context.vdso)
5932+ /* Return stub is in 32bit vsyscall page */
5933+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5934 else
5935- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5936- rt_sigreturn);
5937+ restorer = &frame->retcode;
5938 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5939
5940 /*
5941 * Not actually used anymore, but left because some gdb
5942 * versions need it.
5943 */
5944- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5945+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5946 } put_user_catch(err);
5947
5948 if (err)
5949diff -urNp linux-3.0.7/arch/x86/ia32/sys_ia32.c linux-3.0.7/arch/x86/ia32/sys_ia32.c
5950--- linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-07-21 22:17:23.000000000 -0400
5951+++ linux-3.0.7/arch/x86/ia32/sys_ia32.c 2011-10-06 04:17:55.000000000 -0400
5952@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5953 */
5954 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5955 {
5956- typeof(ubuf->st_uid) uid = 0;
5957- typeof(ubuf->st_gid) gid = 0;
5958+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5959+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5960 SET_UID(uid, stat->uid);
5961 SET_GID(gid, stat->gid);
5962 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
5963@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
5964 }
5965 set_fs(KERNEL_DS);
5966 ret = sys_rt_sigprocmask(how,
5967- set ? (sigset_t __user *)&s : NULL,
5968- oset ? (sigset_t __user *)&s : NULL,
5969+ set ? (sigset_t __force_user *)&s : NULL,
5970+ oset ? (sigset_t __force_user *)&s : NULL,
5971 sigsetsize);
5972 set_fs(old_fs);
5973 if (ret)
5974@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
5975 return alarm_setitimer(seconds);
5976 }
5977
5978-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
5979+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
5980 int options)
5981 {
5982 return compat_sys_wait4(pid, stat_addr, options, NULL);
5983@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
5984 mm_segment_t old_fs = get_fs();
5985
5986 set_fs(KERNEL_DS);
5987- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
5988+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
5989 set_fs(old_fs);
5990 if (put_compat_timespec(&t, interval))
5991 return -EFAULT;
5992@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
5993 mm_segment_t old_fs = get_fs();
5994
5995 set_fs(KERNEL_DS);
5996- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
5997+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
5998 set_fs(old_fs);
5999 if (!ret) {
6000 switch (_NSIG_WORDS) {
6001@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6002 if (copy_siginfo_from_user32(&info, uinfo))
6003 return -EFAULT;
6004 set_fs(KERNEL_DS);
6005- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6006+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6007 set_fs(old_fs);
6008 return ret;
6009 }
6010@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6011 return -EFAULT;
6012
6013 set_fs(KERNEL_DS);
6014- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6015+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6016 count);
6017 set_fs(old_fs);
6018
6019diff -urNp linux-3.0.7/arch/x86/include/asm/alternative-asm.h linux-3.0.7/arch/x86/include/asm/alternative-asm.h
6020--- linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-07-21 22:17:23.000000000 -0400
6021+++ linux-3.0.7/arch/x86/include/asm/alternative-asm.h 2011-10-07 19:07:23.000000000 -0400
6022@@ -15,6 +15,20 @@
6023 .endm
6024 #endif
6025
6026+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6027+ .macro pax_force_retaddr rip=0
6028+ btsq $63,\rip(%rsp)
6029+ .endm
6030+ .macro pax_force_fptr ptr
6031+ btsq $63,\ptr
6032+ .endm
6033+#else
6034+ .macro pax_force_retaddr rip=0
6035+ .endm
6036+ .macro pax_force_fptr ptr
6037+ .endm
6038+#endif
6039+
6040 .macro altinstruction_entry orig alt feature orig_len alt_len
6041 .align 8
6042 .quad \orig
6043diff -urNp linux-3.0.7/arch/x86/include/asm/alternative.h linux-3.0.7/arch/x86/include/asm/alternative.h
6044--- linux-3.0.7/arch/x86/include/asm/alternative.h 2011-07-21 22:17:23.000000000 -0400
6045+++ linux-3.0.7/arch/x86/include/asm/alternative.h 2011-08-23 21:47:55.000000000 -0400
6046@@ -93,7 +93,7 @@ static inline int alternatives_text_rese
6047 ".section .discard,\"aw\",@progbits\n" \
6048 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6049 ".previous\n" \
6050- ".section .altinstr_replacement, \"ax\"\n" \
6051+ ".section .altinstr_replacement, \"a\"\n" \
6052 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6053 ".previous"
6054
6055diff -urNp linux-3.0.7/arch/x86/include/asm/apic.h linux-3.0.7/arch/x86/include/asm/apic.h
6056--- linux-3.0.7/arch/x86/include/asm/apic.h 2011-07-21 22:17:23.000000000 -0400
6057+++ linux-3.0.7/arch/x86/include/asm/apic.h 2011-08-23 21:48:14.000000000 -0400
6058@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6059
6060 #ifdef CONFIG_X86_LOCAL_APIC
6061
6062-extern unsigned int apic_verbosity;
6063+extern int apic_verbosity;
6064 extern int local_apic_timer_c2_ok;
6065
6066 extern int disable_apic;
6067diff -urNp linux-3.0.7/arch/x86/include/asm/apm.h linux-3.0.7/arch/x86/include/asm/apm.h
6068--- linux-3.0.7/arch/x86/include/asm/apm.h 2011-07-21 22:17:23.000000000 -0400
6069+++ linux-3.0.7/arch/x86/include/asm/apm.h 2011-08-23 21:47:55.000000000 -0400
6070@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6071 __asm__ __volatile__(APM_DO_ZERO_SEGS
6072 "pushl %%edi\n\t"
6073 "pushl %%ebp\n\t"
6074- "lcall *%%cs:apm_bios_entry\n\t"
6075+ "lcall *%%ss:apm_bios_entry\n\t"
6076 "setc %%al\n\t"
6077 "popl %%ebp\n\t"
6078 "popl %%edi\n\t"
6079@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6080 __asm__ __volatile__(APM_DO_ZERO_SEGS
6081 "pushl %%edi\n\t"
6082 "pushl %%ebp\n\t"
6083- "lcall *%%cs:apm_bios_entry\n\t"
6084+ "lcall *%%ss:apm_bios_entry\n\t"
6085 "setc %%bl\n\t"
6086 "popl %%ebp\n\t"
6087 "popl %%edi\n\t"
6088diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_32.h linux-3.0.7/arch/x86/include/asm/atomic64_32.h
6089--- linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-07-21 22:17:23.000000000 -0400
6090+++ linux-3.0.7/arch/x86/include/asm/atomic64_32.h 2011-08-23 21:47:55.000000000 -0400
6091@@ -12,6 +12,14 @@ typedef struct {
6092 u64 __aligned(8) counter;
6093 } atomic64_t;
6094
6095+#ifdef CONFIG_PAX_REFCOUNT
6096+typedef struct {
6097+ u64 __aligned(8) counter;
6098+} atomic64_unchecked_t;
6099+#else
6100+typedef atomic64_t atomic64_unchecked_t;
6101+#endif
6102+
6103 #define ATOMIC64_INIT(val) { (val) }
6104
6105 #ifdef CONFIG_X86_CMPXCHG64
6106@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6107 }
6108
6109 /**
6110+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6111+ * @p: pointer to type atomic64_unchecked_t
6112+ * @o: expected value
6113+ * @n: new value
6114+ *
6115+ * Atomically sets @v to @n if it was equal to @o and returns
6116+ * the old value.
6117+ */
6118+
6119+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6120+{
6121+ return cmpxchg64(&v->counter, o, n);
6122+}
6123+
6124+/**
6125 * atomic64_xchg - xchg atomic64 variable
6126 * @v: pointer to type atomic64_t
6127 * @n: value to assign
6128@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6129 }
6130
6131 /**
6132+ * atomic64_set_unchecked - set atomic64 variable
6133+ * @v: pointer to type atomic64_unchecked_t
6134+ * @n: value to assign
6135+ *
6136+ * Atomically sets the value of @v to @n.
6137+ */
6138+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6139+{
6140+ unsigned high = (unsigned)(i >> 32);
6141+ unsigned low = (unsigned)i;
6142+ asm volatile(ATOMIC64_ALTERNATIVE(set)
6143+ : "+b" (low), "+c" (high)
6144+ : "S" (v)
6145+ : "eax", "edx", "memory"
6146+ );
6147+}
6148+
6149+/**
6150 * atomic64_read - read atomic64 variable
6151 * @v: pointer to type atomic64_t
6152 *
6153@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6154 }
6155
6156 /**
6157+ * atomic64_read_unchecked - read atomic64 variable
6158+ * @v: pointer to type atomic64_unchecked_t
6159+ *
6160+ * Atomically reads the value of @v and returns it.
6161+ */
6162+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6163+{
6164+ long long r;
6165+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6166+ : "=A" (r), "+c" (v)
6167+ : : "memory"
6168+ );
6169+ return r;
6170+ }
6171+
6172+/**
6173 * atomic64_add_return - add and return
6174 * @i: integer value to add
6175 * @v: pointer to type atomic64_t
6176@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6177 return i;
6178 }
6179
6180+/**
6181+ * atomic64_add_return_unchecked - add and return
6182+ * @i: integer value to add
6183+ * @v: pointer to type atomic64_unchecked_t
6184+ *
6185+ * Atomically adds @i to @v and returns @i + *@v
6186+ */
6187+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6188+{
6189+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6190+ : "+A" (i), "+c" (v)
6191+ : : "memory"
6192+ );
6193+ return i;
6194+}
6195+
6196 /*
6197 * Other variants with different arithmetic operators:
6198 */
6199@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6200 return a;
6201 }
6202
6203+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6204+{
6205+ long long a;
6206+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6207+ : "=A" (a)
6208+ : "S" (v)
6209+ : "memory", "ecx"
6210+ );
6211+ return a;
6212+}
6213+
6214 static inline long long atomic64_dec_return(atomic64_t *v)
6215 {
6216 long long a;
6217@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6218 }
6219
6220 /**
6221+ * atomic64_add_unchecked - add integer to atomic64 variable
6222+ * @i: integer value to add
6223+ * @v: pointer to type atomic64_unchecked_t
6224+ *
6225+ * Atomically adds @i to @v.
6226+ */
6227+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6228+{
6229+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6230+ : "+A" (i), "+c" (v)
6231+ : : "memory"
6232+ );
6233+ return i;
6234+}
6235+
6236+/**
6237 * atomic64_sub - subtract the atomic64 variable
6238 * @i: integer value to subtract
6239 * @v: pointer to type atomic64_t
6240diff -urNp linux-3.0.7/arch/x86/include/asm/atomic64_64.h linux-3.0.7/arch/x86/include/asm/atomic64_64.h
6241--- linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-07-21 22:17:23.000000000 -0400
6242+++ linux-3.0.7/arch/x86/include/asm/atomic64_64.h 2011-08-23 21:47:55.000000000 -0400
6243@@ -18,7 +18,19 @@
6244 */
6245 static inline long atomic64_read(const atomic64_t *v)
6246 {
6247- return (*(volatile long *)&(v)->counter);
6248+ return (*(volatile const long *)&(v)->counter);
6249+}
6250+
6251+/**
6252+ * atomic64_read_unchecked - read atomic64 variable
6253+ * @v: pointer of type atomic64_unchecked_t
6254+ *
6255+ * Atomically reads the value of @v.
6256+ * Doesn't imply a read memory barrier.
6257+ */
6258+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6259+{
6260+ return (*(volatile const long *)&(v)->counter);
6261 }
6262
6263 /**
6264@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6265 }
6266
6267 /**
6268+ * atomic64_set_unchecked - set atomic64 variable
6269+ * @v: pointer to type atomic64_unchecked_t
6270+ * @i: required value
6271+ *
6272+ * Atomically sets the value of @v to @i.
6273+ */
6274+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6275+{
6276+ v->counter = i;
6277+}
6278+
6279+/**
6280 * atomic64_add - add integer to atomic64 variable
6281 * @i: integer value to add
6282 * @v: pointer to type atomic64_t
6283@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6284 */
6285 static inline void atomic64_add(long i, atomic64_t *v)
6286 {
6287+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6288+
6289+#ifdef CONFIG_PAX_REFCOUNT
6290+ "jno 0f\n"
6291+ LOCK_PREFIX "subq %1,%0\n"
6292+ "int $4\n0:\n"
6293+ _ASM_EXTABLE(0b, 0b)
6294+#endif
6295+
6296+ : "=m" (v->counter)
6297+ : "er" (i), "m" (v->counter));
6298+}
6299+
6300+/**
6301+ * atomic64_add_unchecked - add integer to atomic64 variable
6302+ * @i: integer value to add
6303+ * @v: pointer to type atomic64_unchecked_t
6304+ *
6305+ * Atomically adds @i to @v.
6306+ */
6307+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6308+{
6309 asm volatile(LOCK_PREFIX "addq %1,%0"
6310 : "=m" (v->counter)
6311 : "er" (i), "m" (v->counter));
6312@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6313 */
6314 static inline void atomic64_sub(long i, atomic64_t *v)
6315 {
6316- asm volatile(LOCK_PREFIX "subq %1,%0"
6317+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6318+
6319+#ifdef CONFIG_PAX_REFCOUNT
6320+ "jno 0f\n"
6321+ LOCK_PREFIX "addq %1,%0\n"
6322+ "int $4\n0:\n"
6323+ _ASM_EXTABLE(0b, 0b)
6324+#endif
6325+
6326+ : "=m" (v->counter)
6327+ : "er" (i), "m" (v->counter));
6328+}
6329+
6330+/**
6331+ * atomic64_sub_unchecked - subtract the atomic64 variable
6332+ * @i: integer value to subtract
6333+ * @v: pointer to type atomic64_unchecked_t
6334+ *
6335+ * Atomically subtracts @i from @v.
6336+ */
6337+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6338+{
6339+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6340 : "=m" (v->counter)
6341 : "er" (i), "m" (v->counter));
6342 }
6343@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6344 {
6345 unsigned char c;
6346
6347- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6348+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6349+
6350+#ifdef CONFIG_PAX_REFCOUNT
6351+ "jno 0f\n"
6352+ LOCK_PREFIX "addq %2,%0\n"
6353+ "int $4\n0:\n"
6354+ _ASM_EXTABLE(0b, 0b)
6355+#endif
6356+
6357+ "sete %1\n"
6358 : "=m" (v->counter), "=qm" (c)
6359 : "er" (i), "m" (v->counter) : "memory");
6360 return c;
6361@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6362 */
6363 static inline void atomic64_inc(atomic64_t *v)
6364 {
6365+ asm volatile(LOCK_PREFIX "incq %0\n"
6366+
6367+#ifdef CONFIG_PAX_REFCOUNT
6368+ "jno 0f\n"
6369+ LOCK_PREFIX "decq %0\n"
6370+ "int $4\n0:\n"
6371+ _ASM_EXTABLE(0b, 0b)
6372+#endif
6373+
6374+ : "=m" (v->counter)
6375+ : "m" (v->counter));
6376+}
6377+
6378+/**
6379+ * atomic64_inc_unchecked - increment atomic64 variable
6380+ * @v: pointer to type atomic64_unchecked_t
6381+ *
6382+ * Atomically increments @v by 1.
6383+ */
6384+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6385+{
6386 asm volatile(LOCK_PREFIX "incq %0"
6387 : "=m" (v->counter)
6388 : "m" (v->counter));
6389@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6390 */
6391 static inline void atomic64_dec(atomic64_t *v)
6392 {
6393- asm volatile(LOCK_PREFIX "decq %0"
6394+ asm volatile(LOCK_PREFIX "decq %0\n"
6395+
6396+#ifdef CONFIG_PAX_REFCOUNT
6397+ "jno 0f\n"
6398+ LOCK_PREFIX "incq %0\n"
6399+ "int $4\n0:\n"
6400+ _ASM_EXTABLE(0b, 0b)
6401+#endif
6402+
6403+ : "=m" (v->counter)
6404+ : "m" (v->counter));
6405+}
6406+
6407+/**
6408+ * atomic64_dec_unchecked - decrement atomic64 variable
6409+ * @v: pointer to type atomic64_t
6410+ *
6411+ * Atomically decrements @v by 1.
6412+ */
6413+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6414+{
6415+ asm volatile(LOCK_PREFIX "decq %0\n"
6416 : "=m" (v->counter)
6417 : "m" (v->counter));
6418 }
6419@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6420 {
6421 unsigned char c;
6422
6423- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6424+ asm volatile(LOCK_PREFIX "decq %0\n"
6425+
6426+#ifdef CONFIG_PAX_REFCOUNT
6427+ "jno 0f\n"
6428+ LOCK_PREFIX "incq %0\n"
6429+ "int $4\n0:\n"
6430+ _ASM_EXTABLE(0b, 0b)
6431+#endif
6432+
6433+ "sete %1\n"
6434 : "=m" (v->counter), "=qm" (c)
6435 : "m" (v->counter) : "memory");
6436 return c != 0;
6437@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6438 {
6439 unsigned char c;
6440
6441- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6442+ asm volatile(LOCK_PREFIX "incq %0\n"
6443+
6444+#ifdef CONFIG_PAX_REFCOUNT
6445+ "jno 0f\n"
6446+ LOCK_PREFIX "decq %0\n"
6447+ "int $4\n0:\n"
6448+ _ASM_EXTABLE(0b, 0b)
6449+#endif
6450+
6451+ "sete %1\n"
6452 : "=m" (v->counter), "=qm" (c)
6453 : "m" (v->counter) : "memory");
6454 return c != 0;
6455@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6456 {
6457 unsigned char c;
6458
6459- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6460+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6461+
6462+#ifdef CONFIG_PAX_REFCOUNT
6463+ "jno 0f\n"
6464+ LOCK_PREFIX "subq %2,%0\n"
6465+ "int $4\n0:\n"
6466+ _ASM_EXTABLE(0b, 0b)
6467+#endif
6468+
6469+ "sets %1\n"
6470 : "=m" (v->counter), "=qm" (c)
6471 : "er" (i), "m" (v->counter) : "memory");
6472 return c;
6473@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6474 static inline long atomic64_add_return(long i, atomic64_t *v)
6475 {
6476 long __i = i;
6477- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6478+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6479+
6480+#ifdef CONFIG_PAX_REFCOUNT
6481+ "jno 0f\n"
6482+ "movq %0, %1\n"
6483+ "int $4\n0:\n"
6484+ _ASM_EXTABLE(0b, 0b)
6485+#endif
6486+
6487+ : "+r" (i), "+m" (v->counter)
6488+ : : "memory");
6489+ return i + __i;
6490+}
6491+
6492+/**
6493+ * atomic64_add_return_unchecked - add and return
6494+ * @i: integer value to add
6495+ * @v: pointer to type atomic64_unchecked_t
6496+ *
6497+ * Atomically adds @i to @v and returns @i + @v
6498+ */
6499+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6500+{
6501+ long __i = i;
6502+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6503 : "+r" (i), "+m" (v->counter)
6504 : : "memory");
6505 return i + __i;
6506@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6507 }
6508
6509 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6510+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6511+{
6512+ return atomic64_add_return_unchecked(1, v);
6513+}
6514 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6515
6516 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6517@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6518 return cmpxchg(&v->counter, old, new);
6519 }
6520
6521+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6522+{
6523+ return cmpxchg(&v->counter, old, new);
6524+}
6525+
6526 static inline long atomic64_xchg(atomic64_t *v, long new)
6527 {
6528 return xchg(&v->counter, new);
6529@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6530 */
6531 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6532 {
6533- long c, old;
6534+ long c, old, new;
6535 c = atomic64_read(v);
6536 for (;;) {
6537- if (unlikely(c == (u)))
6538+ if (unlikely(c == u))
6539 break;
6540- old = atomic64_cmpxchg((v), c, c + (a));
6541+
6542+ asm volatile("add %2,%0\n"
6543+
6544+#ifdef CONFIG_PAX_REFCOUNT
6545+ "jno 0f\n"
6546+ "sub %2,%0\n"
6547+ "int $4\n0:\n"
6548+ _ASM_EXTABLE(0b, 0b)
6549+#endif
6550+
6551+ : "=r" (new)
6552+ : "0" (c), "ir" (a));
6553+
6554+ old = atomic64_cmpxchg(v, c, new);
6555 if (likely(old == c))
6556 break;
6557 c = old;
6558 }
6559- return c != (u);
6560+ return c != u;
6561 }
6562
6563 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6564diff -urNp linux-3.0.7/arch/x86/include/asm/atomic.h linux-3.0.7/arch/x86/include/asm/atomic.h
6565--- linux-3.0.7/arch/x86/include/asm/atomic.h 2011-07-21 22:17:23.000000000 -0400
6566+++ linux-3.0.7/arch/x86/include/asm/atomic.h 2011-08-23 21:47:55.000000000 -0400
6567@@ -22,7 +22,18 @@
6568 */
6569 static inline int atomic_read(const atomic_t *v)
6570 {
6571- return (*(volatile int *)&(v)->counter);
6572+ return (*(volatile const int *)&(v)->counter);
6573+}
6574+
6575+/**
6576+ * atomic_read_unchecked - read atomic variable
6577+ * @v: pointer of type atomic_unchecked_t
6578+ *
6579+ * Atomically reads the value of @v.
6580+ */
6581+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6582+{
6583+ return (*(volatile const int *)&(v)->counter);
6584 }
6585
6586 /**
6587@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6588 }
6589
6590 /**
6591+ * atomic_set_unchecked - set atomic variable
6592+ * @v: pointer of type atomic_unchecked_t
6593+ * @i: required value
6594+ *
6595+ * Atomically sets the value of @v to @i.
6596+ */
6597+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6598+{
6599+ v->counter = i;
6600+}
6601+
6602+/**
6603 * atomic_add - add integer to atomic variable
6604 * @i: integer value to add
6605 * @v: pointer of type atomic_t
6606@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6607 */
6608 static inline void atomic_add(int i, atomic_t *v)
6609 {
6610- asm volatile(LOCK_PREFIX "addl %1,%0"
6611+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6612+
6613+#ifdef CONFIG_PAX_REFCOUNT
6614+ "jno 0f\n"
6615+ LOCK_PREFIX "subl %1,%0\n"
6616+ "int $4\n0:\n"
6617+ _ASM_EXTABLE(0b, 0b)
6618+#endif
6619+
6620+ : "+m" (v->counter)
6621+ : "ir" (i));
6622+}
6623+
6624+/**
6625+ * atomic_add_unchecked - add integer to atomic variable
6626+ * @i: integer value to add
6627+ * @v: pointer of type atomic_unchecked_t
6628+ *
6629+ * Atomically adds @i to @v.
6630+ */
6631+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6632+{
6633+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6634 : "+m" (v->counter)
6635 : "ir" (i));
6636 }
6637@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6638 */
6639 static inline void atomic_sub(int i, atomic_t *v)
6640 {
6641- asm volatile(LOCK_PREFIX "subl %1,%0"
6642+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6643+
6644+#ifdef CONFIG_PAX_REFCOUNT
6645+ "jno 0f\n"
6646+ LOCK_PREFIX "addl %1,%0\n"
6647+ "int $4\n0:\n"
6648+ _ASM_EXTABLE(0b, 0b)
6649+#endif
6650+
6651+ : "+m" (v->counter)
6652+ : "ir" (i));
6653+}
6654+
6655+/**
6656+ * atomic_sub_unchecked - subtract integer from atomic variable
6657+ * @i: integer value to subtract
6658+ * @v: pointer of type atomic_unchecked_t
6659+ *
6660+ * Atomically subtracts @i from @v.
6661+ */
6662+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6663+{
6664+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6665 : "+m" (v->counter)
6666 : "ir" (i));
6667 }
6668@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6669 {
6670 unsigned char c;
6671
6672- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6673+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6674+
6675+#ifdef CONFIG_PAX_REFCOUNT
6676+ "jno 0f\n"
6677+ LOCK_PREFIX "addl %2,%0\n"
6678+ "int $4\n0:\n"
6679+ _ASM_EXTABLE(0b, 0b)
6680+#endif
6681+
6682+ "sete %1\n"
6683 : "+m" (v->counter), "=qm" (c)
6684 : "ir" (i) : "memory");
6685 return c;
6686@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6687 */
6688 static inline void atomic_inc(atomic_t *v)
6689 {
6690- asm volatile(LOCK_PREFIX "incl %0"
6691+ asm volatile(LOCK_PREFIX "incl %0\n"
6692+
6693+#ifdef CONFIG_PAX_REFCOUNT
6694+ "jno 0f\n"
6695+ LOCK_PREFIX "decl %0\n"
6696+ "int $4\n0:\n"
6697+ _ASM_EXTABLE(0b, 0b)
6698+#endif
6699+
6700+ : "+m" (v->counter));
6701+}
6702+
6703+/**
6704+ * atomic_inc_unchecked - increment atomic variable
6705+ * @v: pointer of type atomic_unchecked_t
6706+ *
6707+ * Atomically increments @v by 1.
6708+ */
6709+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6710+{
6711+ asm volatile(LOCK_PREFIX "incl %0\n"
6712 : "+m" (v->counter));
6713 }
6714
6715@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6716 */
6717 static inline void atomic_dec(atomic_t *v)
6718 {
6719- asm volatile(LOCK_PREFIX "decl %0"
6720+ asm volatile(LOCK_PREFIX "decl %0\n"
6721+
6722+#ifdef CONFIG_PAX_REFCOUNT
6723+ "jno 0f\n"
6724+ LOCK_PREFIX "incl %0\n"
6725+ "int $4\n0:\n"
6726+ _ASM_EXTABLE(0b, 0b)
6727+#endif
6728+
6729+ : "+m" (v->counter));
6730+}
6731+
6732+/**
6733+ * atomic_dec_unchecked - decrement atomic variable
6734+ * @v: pointer of type atomic_unchecked_t
6735+ *
6736+ * Atomically decrements @v by 1.
6737+ */
6738+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6739+{
6740+ asm volatile(LOCK_PREFIX "decl %0\n"
6741 : "+m" (v->counter));
6742 }
6743
6744@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6745 {
6746 unsigned char c;
6747
6748- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6749+ asm volatile(LOCK_PREFIX "decl %0\n"
6750+
6751+#ifdef CONFIG_PAX_REFCOUNT
6752+ "jno 0f\n"
6753+ LOCK_PREFIX "incl %0\n"
6754+ "int $4\n0:\n"
6755+ _ASM_EXTABLE(0b, 0b)
6756+#endif
6757+
6758+ "sete %1\n"
6759 : "+m" (v->counter), "=qm" (c)
6760 : : "memory");
6761 return c != 0;
6762@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6763 {
6764 unsigned char c;
6765
6766- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6767+ asm volatile(LOCK_PREFIX "incl %0\n"
6768+
6769+#ifdef CONFIG_PAX_REFCOUNT
6770+ "jno 0f\n"
6771+ LOCK_PREFIX "decl %0\n"
6772+ "int $4\n0:\n"
6773+ _ASM_EXTABLE(0b, 0b)
6774+#endif
6775+
6776+ "sete %1\n"
6777+ : "+m" (v->counter), "=qm" (c)
6778+ : : "memory");
6779+ return c != 0;
6780+}
6781+
6782+/**
6783+ * atomic_inc_and_test_unchecked - increment and test
6784+ * @v: pointer of type atomic_unchecked_t
6785+ *
6786+ * Atomically increments @v by 1
6787+ * and returns true if the result is zero, or false for all
6788+ * other cases.
6789+ */
6790+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6791+{
6792+ unsigned char c;
6793+
6794+ asm volatile(LOCK_PREFIX "incl %0\n"
6795+ "sete %1\n"
6796 : "+m" (v->counter), "=qm" (c)
6797 : : "memory");
6798 return c != 0;
6799@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6800 {
6801 unsigned char c;
6802
6803- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6804+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6805+
6806+#ifdef CONFIG_PAX_REFCOUNT
6807+ "jno 0f\n"
6808+ LOCK_PREFIX "subl %2,%0\n"
6809+ "int $4\n0:\n"
6810+ _ASM_EXTABLE(0b, 0b)
6811+#endif
6812+
6813+ "sets %1\n"
6814 : "+m" (v->counter), "=qm" (c)
6815 : "ir" (i) : "memory");
6816 return c;
6817@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6818 #endif
6819 /* Modern 486+ processor */
6820 __i = i;
6821+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6822+
6823+#ifdef CONFIG_PAX_REFCOUNT
6824+ "jno 0f\n"
6825+ "movl %0, %1\n"
6826+ "int $4\n0:\n"
6827+ _ASM_EXTABLE(0b, 0b)
6828+#endif
6829+
6830+ : "+r" (i), "+m" (v->counter)
6831+ : : "memory");
6832+ return i + __i;
6833+
6834+#ifdef CONFIG_M386
6835+no_xadd: /* Legacy 386 processor */
6836+ local_irq_save(flags);
6837+ __i = atomic_read(v);
6838+ atomic_set(v, i + __i);
6839+ local_irq_restore(flags);
6840+ return i + __i;
6841+#endif
6842+}
6843+
6844+/**
6845+ * atomic_add_return_unchecked - add integer and return
6846+ * @v: pointer of type atomic_unchecked_t
6847+ * @i: integer value to add
6848+ *
6849+ * Atomically adds @i to @v and returns @i + @v
6850+ */
6851+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6852+{
6853+ int __i;
6854+#ifdef CONFIG_M386
6855+ unsigned long flags;
6856+ if (unlikely(boot_cpu_data.x86 <= 3))
6857+ goto no_xadd;
6858+#endif
6859+ /* Modern 486+ processor */
6860+ __i = i;
6861 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6862 : "+r" (i), "+m" (v->counter)
6863 : : "memory");
6864@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6865 }
6866
6867 #define atomic_inc_return(v) (atomic_add_return(1, v))
6868+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6869+{
6870+ return atomic_add_return_unchecked(1, v);
6871+}
6872 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6873
6874 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6875@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6876 return cmpxchg(&v->counter, old, new);
6877 }
6878
6879+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6880+{
6881+ return cmpxchg(&v->counter, old, new);
6882+}
6883+
6884 static inline int atomic_xchg(atomic_t *v, int new)
6885 {
6886 return xchg(&v->counter, new);
6887 }
6888
6889+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6890+{
6891+ return xchg(&v->counter, new);
6892+}
6893+
6894 /**
6895 * atomic_add_unless - add unless the number is already a given value
6896 * @v: pointer of type atomic_t
6897@@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6898 */
6899 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6900 {
6901- int c, old;
6902+ int c, old, new;
6903 c = atomic_read(v);
6904 for (;;) {
6905- if (unlikely(c == (u)))
6906+ if (unlikely(c == u))
6907 break;
6908- old = atomic_cmpxchg((v), c, c + (a));
6909+
6910+ asm volatile("addl %2,%0\n"
6911+
6912+#ifdef CONFIG_PAX_REFCOUNT
6913+ "jno 0f\n"
6914+ "subl %2,%0\n"
6915+ "int $4\n0:\n"
6916+ _ASM_EXTABLE(0b, 0b)
6917+#endif
6918+
6919+ : "=r" (new)
6920+ : "0" (c), "ir" (a));
6921+
6922+ old = atomic_cmpxchg(v, c, new);
6923 if (likely(old == c))
6924 break;
6925 c = old;
6926 }
6927- return c != (u);
6928+ return c != u;
6929 }
6930
6931 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6932
6933+/**
6934+ * atomic_inc_not_zero_hint - increment if not null
6935+ * @v: pointer of type atomic_t
6936+ * @hint: probable value of the atomic before the increment
6937+ *
6938+ * This version of atomic_inc_not_zero() gives a hint of probable
6939+ * value of the atomic. This helps processor to not read the memory
6940+ * before doing the atomic read/modify/write cycle, lowering
6941+ * number of bus transactions on some arches.
6942+ *
6943+ * Returns: 0 if increment was not done, 1 otherwise.
6944+ */
6945+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6946+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6947+{
6948+ int val, c = hint, new;
6949+
6950+ /* sanity test, should be removed by compiler if hint is a constant */
6951+ if (!hint)
6952+ return atomic_inc_not_zero(v);
6953+
6954+ do {
6955+ asm volatile("incl %0\n"
6956+
6957+#ifdef CONFIG_PAX_REFCOUNT
6958+ "jno 0f\n"
6959+ "decl %0\n"
6960+ "int $4\n0:\n"
6961+ _ASM_EXTABLE(0b, 0b)
6962+#endif
6963+
6964+ : "=r" (new)
6965+ : "0" (c));
6966+
6967+ val = atomic_cmpxchg(v, c, new);
6968+ if (val == c)
6969+ return 1;
6970+ c = val;
6971+ } while (c);
6972+
6973+ return 0;
6974+}
6975+
6976 /*
6977 * atomic_dec_if_positive - decrement by 1 if old value positive
6978 * @v: pointer of type atomic_t
6979diff -urNp linux-3.0.7/arch/x86/include/asm/bitops.h linux-3.0.7/arch/x86/include/asm/bitops.h
6980--- linux-3.0.7/arch/x86/include/asm/bitops.h 2011-07-21 22:17:23.000000000 -0400
6981+++ linux-3.0.7/arch/x86/include/asm/bitops.h 2011-08-23 21:47:55.000000000 -0400
6982@@ -38,7 +38,7 @@
6983 * a mask operation on a byte.
6984 */
6985 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6986-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6987+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6988 #define CONST_MASK(nr) (1 << ((nr) & 7))
6989
6990 /**
6991diff -urNp linux-3.0.7/arch/x86/include/asm/boot.h linux-3.0.7/arch/x86/include/asm/boot.h
6992--- linux-3.0.7/arch/x86/include/asm/boot.h 2011-07-21 22:17:23.000000000 -0400
6993+++ linux-3.0.7/arch/x86/include/asm/boot.h 2011-08-23 21:47:55.000000000 -0400
6994@@ -11,10 +11,15 @@
6995 #include <asm/pgtable_types.h>
6996
6997 /* Physical address where kernel should be loaded. */
6998-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6999+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7000 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7001 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7002
7003+#ifndef __ASSEMBLY__
7004+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7005+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7006+#endif
7007+
7008 /* Minimum kernel alignment, as a power of two */
7009 #ifdef CONFIG_X86_64
7010 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7011diff -urNp linux-3.0.7/arch/x86/include/asm/cacheflush.h linux-3.0.7/arch/x86/include/asm/cacheflush.h
7012--- linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-07-21 22:17:23.000000000 -0400
7013+++ linux-3.0.7/arch/x86/include/asm/cacheflush.h 2011-08-23 21:47:55.000000000 -0400
7014@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7015 unsigned long pg_flags = pg->flags & _PGMT_MASK;
7016
7017 if (pg_flags == _PGMT_DEFAULT)
7018- return -1;
7019+ return ~0UL;
7020 else if (pg_flags == _PGMT_WC)
7021 return _PAGE_CACHE_WC;
7022 else if (pg_flags == _PGMT_UC_MINUS)
7023diff -urNp linux-3.0.7/arch/x86/include/asm/cache.h linux-3.0.7/arch/x86/include/asm/cache.h
7024--- linux-3.0.7/arch/x86/include/asm/cache.h 2011-07-21 22:17:23.000000000 -0400
7025+++ linux-3.0.7/arch/x86/include/asm/cache.h 2011-08-23 21:47:55.000000000 -0400
7026@@ -5,12 +5,13 @@
7027
7028 /* L1 cache line size */
7029 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7030-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7031+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7032
7033 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7034+#define __read_only __attribute__((__section__(".data..read_only")))
7035
7036 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7037-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7038+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7039
7040 #ifdef CONFIG_X86_VSMP
7041 #ifdef CONFIG_SMP
7042diff -urNp linux-3.0.7/arch/x86/include/asm/checksum_32.h linux-3.0.7/arch/x86/include/asm/checksum_32.h
7043--- linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-07-21 22:17:23.000000000 -0400
7044+++ linux-3.0.7/arch/x86/include/asm/checksum_32.h 2011-08-23 21:47:55.000000000 -0400
7045@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7046 int len, __wsum sum,
7047 int *src_err_ptr, int *dst_err_ptr);
7048
7049+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7050+ int len, __wsum sum,
7051+ int *src_err_ptr, int *dst_err_ptr);
7052+
7053+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7054+ int len, __wsum sum,
7055+ int *src_err_ptr, int *dst_err_ptr);
7056+
7057 /*
7058 * Note: when you get a NULL pointer exception here this means someone
7059 * passed in an incorrect kernel address to one of these functions.
7060@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7061 int *err_ptr)
7062 {
7063 might_sleep();
7064- return csum_partial_copy_generic((__force void *)src, dst,
7065+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7066 len, sum, err_ptr, NULL);
7067 }
7068
7069@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7070 {
7071 might_sleep();
7072 if (access_ok(VERIFY_WRITE, dst, len))
7073- return csum_partial_copy_generic(src, (__force void *)dst,
7074+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7075 len, sum, NULL, err_ptr);
7076
7077 if (len)
7078diff -urNp linux-3.0.7/arch/x86/include/asm/cpufeature.h linux-3.0.7/arch/x86/include/asm/cpufeature.h
7079--- linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-07-21 22:17:23.000000000 -0400
7080+++ linux-3.0.7/arch/x86/include/asm/cpufeature.h 2011-08-23 21:47:55.000000000 -0400
7081@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7082 ".section .discard,\"aw\",@progbits\n"
7083 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7084 ".previous\n"
7085- ".section .altinstr_replacement,\"ax\"\n"
7086+ ".section .altinstr_replacement,\"a\"\n"
7087 "3: movb $1,%0\n"
7088 "4:\n"
7089 ".previous\n"
7090diff -urNp linux-3.0.7/arch/x86/include/asm/desc_defs.h linux-3.0.7/arch/x86/include/asm/desc_defs.h
7091--- linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-07-21 22:17:23.000000000 -0400
7092+++ linux-3.0.7/arch/x86/include/asm/desc_defs.h 2011-08-23 21:47:55.000000000 -0400
7093@@ -31,6 +31,12 @@ struct desc_struct {
7094 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7095 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7096 };
7097+ struct {
7098+ u16 offset_low;
7099+ u16 seg;
7100+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7101+ unsigned offset_high: 16;
7102+ } gate;
7103 };
7104 } __attribute__((packed));
7105
7106diff -urNp linux-3.0.7/arch/x86/include/asm/desc.h linux-3.0.7/arch/x86/include/asm/desc.h
7107--- linux-3.0.7/arch/x86/include/asm/desc.h 2011-07-21 22:17:23.000000000 -0400
7108+++ linux-3.0.7/arch/x86/include/asm/desc.h 2011-08-23 21:47:55.000000000 -0400
7109@@ -4,6 +4,7 @@
7110 #include <asm/desc_defs.h>
7111 #include <asm/ldt.h>
7112 #include <asm/mmu.h>
7113+#include <asm/pgtable.h>
7114
7115 #include <linux/smp.h>
7116
7117@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7118
7119 desc->type = (info->read_exec_only ^ 1) << 1;
7120 desc->type |= info->contents << 2;
7121+ desc->type |= info->seg_not_present ^ 1;
7122
7123 desc->s = 1;
7124 desc->dpl = 0x3;
7125@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7126 }
7127
7128 extern struct desc_ptr idt_descr;
7129-extern gate_desc idt_table[];
7130-
7131-struct gdt_page {
7132- struct desc_struct gdt[GDT_ENTRIES];
7133-} __attribute__((aligned(PAGE_SIZE)));
7134-
7135-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7136+extern gate_desc idt_table[256];
7137
7138+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7139 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7140 {
7141- return per_cpu(gdt_page, cpu).gdt;
7142+ return cpu_gdt_table[cpu];
7143 }
7144
7145 #ifdef CONFIG_X86_64
7146@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7147 unsigned long base, unsigned dpl, unsigned flags,
7148 unsigned short seg)
7149 {
7150- gate->a = (seg << 16) | (base & 0xffff);
7151- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7152+ gate->gate.offset_low = base;
7153+ gate->gate.seg = seg;
7154+ gate->gate.reserved = 0;
7155+ gate->gate.type = type;
7156+ gate->gate.s = 0;
7157+ gate->gate.dpl = dpl;
7158+ gate->gate.p = 1;
7159+ gate->gate.offset_high = base >> 16;
7160 }
7161
7162 #endif
7163@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7164
7165 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7166 {
7167+ pax_open_kernel();
7168 memcpy(&idt[entry], gate, sizeof(*gate));
7169+ pax_close_kernel();
7170 }
7171
7172 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7173 {
7174+ pax_open_kernel();
7175 memcpy(&ldt[entry], desc, 8);
7176+ pax_close_kernel();
7177 }
7178
7179 static inline void
7180@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7181 default: size = sizeof(*gdt); break;
7182 }
7183
7184+ pax_open_kernel();
7185 memcpy(&gdt[entry], desc, size);
7186+ pax_close_kernel();
7187 }
7188
7189 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7190@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7191
7192 static inline void native_load_tr_desc(void)
7193 {
7194+ pax_open_kernel();
7195 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7196+ pax_close_kernel();
7197 }
7198
7199 static inline void native_load_gdt(const struct desc_ptr *dtr)
7200@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7201 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7202 unsigned int i;
7203
7204+ pax_open_kernel();
7205 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7206 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7207+ pax_close_kernel();
7208 }
7209
7210 #define _LDT_empty(info) \
7211@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7212 desc->limit = (limit >> 16) & 0xf;
7213 }
7214
7215-static inline void _set_gate(int gate, unsigned type, void *addr,
7216+static inline void _set_gate(int gate, unsigned type, const void *addr,
7217 unsigned dpl, unsigned ist, unsigned seg)
7218 {
7219 gate_desc s;
7220@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7221 * Pentium F0 0F bugfix can have resulted in the mapped
7222 * IDT being write-protected.
7223 */
7224-static inline void set_intr_gate(unsigned int n, void *addr)
7225+static inline void set_intr_gate(unsigned int n, const void *addr)
7226 {
7227 BUG_ON((unsigned)n > 0xFF);
7228 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7229@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7230 /*
7231 * This routine sets up an interrupt gate at directory privilege level 3.
7232 */
7233-static inline void set_system_intr_gate(unsigned int n, void *addr)
7234+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7235 {
7236 BUG_ON((unsigned)n > 0xFF);
7237 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7238 }
7239
7240-static inline void set_system_trap_gate(unsigned int n, void *addr)
7241+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7242 {
7243 BUG_ON((unsigned)n > 0xFF);
7244 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7245 }
7246
7247-static inline void set_trap_gate(unsigned int n, void *addr)
7248+static inline void set_trap_gate(unsigned int n, const void *addr)
7249 {
7250 BUG_ON((unsigned)n > 0xFF);
7251 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7252@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7253 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7254 {
7255 BUG_ON((unsigned)n > 0xFF);
7256- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7257+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7258 }
7259
7260-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7261+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7262 {
7263 BUG_ON((unsigned)n > 0xFF);
7264 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7265 }
7266
7267-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7268+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7269 {
7270 BUG_ON((unsigned)n > 0xFF);
7271 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7272 }
7273
7274+#ifdef CONFIG_X86_32
7275+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7276+{
7277+ struct desc_struct d;
7278+
7279+ if (likely(limit))
7280+ limit = (limit - 1UL) >> PAGE_SHIFT;
7281+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7282+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7283+}
7284+#endif
7285+
7286 #endif /* _ASM_X86_DESC_H */
7287diff -urNp linux-3.0.7/arch/x86/include/asm/e820.h linux-3.0.7/arch/x86/include/asm/e820.h
7288--- linux-3.0.7/arch/x86/include/asm/e820.h 2011-07-21 22:17:23.000000000 -0400
7289+++ linux-3.0.7/arch/x86/include/asm/e820.h 2011-08-23 21:47:55.000000000 -0400
7290@@ -69,7 +69,7 @@ struct e820map {
7291 #define ISA_START_ADDRESS 0xa0000
7292 #define ISA_END_ADDRESS 0x100000
7293
7294-#define BIOS_BEGIN 0x000a0000
7295+#define BIOS_BEGIN 0x000c0000
7296 #define BIOS_END 0x00100000
7297
7298 #define BIOS_ROM_BASE 0xffe00000
7299diff -urNp linux-3.0.7/arch/x86/include/asm/elf.h linux-3.0.7/arch/x86/include/asm/elf.h
7300--- linux-3.0.7/arch/x86/include/asm/elf.h 2011-07-21 22:17:23.000000000 -0400
7301+++ linux-3.0.7/arch/x86/include/asm/elf.h 2011-08-23 21:47:55.000000000 -0400
7302@@ -237,7 +237,25 @@ extern int force_personality32;
7303 the loader. We need to make sure that it is out of the way of the program
7304 that it will "exec", and that there is sufficient room for the brk. */
7305
7306+#ifdef CONFIG_PAX_SEGMEXEC
7307+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7308+#else
7309 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7310+#endif
7311+
7312+#ifdef CONFIG_PAX_ASLR
7313+#ifdef CONFIG_X86_32
7314+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7315+
7316+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7317+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7318+#else
7319+#define PAX_ELF_ET_DYN_BASE 0x400000UL
7320+
7321+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7322+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7323+#endif
7324+#endif
7325
7326 /* This yields a mask that user programs can use to figure out what
7327 instruction set this CPU supports. This could be done in user space,
7328@@ -290,9 +308,7 @@ do { \
7329
7330 #define ARCH_DLINFO \
7331 do { \
7332- if (vdso_enabled) \
7333- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7334- (unsigned long)current->mm->context.vdso); \
7335+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7336 } while (0)
7337
7338 #define AT_SYSINFO 32
7339@@ -303,7 +319,7 @@ do { \
7340
7341 #endif /* !CONFIG_X86_32 */
7342
7343-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7344+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7345
7346 #define VDSO_ENTRY \
7347 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7348@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7349 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7350 #define compat_arch_setup_additional_pages syscall32_setup_pages
7351
7352-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7353-#define arch_randomize_brk arch_randomize_brk
7354-
7355 #endif /* _ASM_X86_ELF_H */
7356diff -urNp linux-3.0.7/arch/x86/include/asm/emergency-restart.h linux-3.0.7/arch/x86/include/asm/emergency-restart.h
7357--- linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-07-21 22:17:23.000000000 -0400
7358+++ linux-3.0.7/arch/x86/include/asm/emergency-restart.h 2011-08-23 21:47:55.000000000 -0400
7359@@ -15,6 +15,6 @@ enum reboot_type {
7360
7361 extern enum reboot_type reboot_type;
7362
7363-extern void machine_emergency_restart(void);
7364+extern void machine_emergency_restart(void) __noreturn;
7365
7366 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7367diff -urNp linux-3.0.7/arch/x86/include/asm/futex.h linux-3.0.7/arch/x86/include/asm/futex.h
7368--- linux-3.0.7/arch/x86/include/asm/futex.h 2011-07-21 22:17:23.000000000 -0400
7369+++ linux-3.0.7/arch/x86/include/asm/futex.h 2011-10-06 04:17:55.000000000 -0400
7370@@ -12,16 +12,18 @@
7371 #include <asm/system.h>
7372
7373 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7374+ typecheck(u32 __user *, uaddr); \
7375 asm volatile("1:\t" insn "\n" \
7376 "2:\t.section .fixup,\"ax\"\n" \
7377 "3:\tmov\t%3, %1\n" \
7378 "\tjmp\t2b\n" \
7379 "\t.previous\n" \
7380 _ASM_EXTABLE(1b, 3b) \
7381- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7382+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7383 : "i" (-EFAULT), "0" (oparg), "1" (0))
7384
7385 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7386+ typecheck(u32 __user *, uaddr); \
7387 asm volatile("1:\tmovl %2, %0\n" \
7388 "\tmovl\t%0, %3\n" \
7389 "\t" insn "\n" \
7390@@ -34,7 +36,7 @@
7391 _ASM_EXTABLE(1b, 4b) \
7392 _ASM_EXTABLE(2b, 4b) \
7393 : "=&a" (oldval), "=&r" (ret), \
7394- "+m" (*uaddr), "=&r" (tem) \
7395+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7396 : "r" (oparg), "i" (-EFAULT), "1" (0))
7397
7398 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7399@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7400
7401 switch (op) {
7402 case FUTEX_OP_SET:
7403- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7404+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7405 break;
7406 case FUTEX_OP_ADD:
7407- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7408+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7409 uaddr, oparg);
7410 break;
7411 case FUTEX_OP_OR:
7412@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7413 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7414 return -EFAULT;
7415
7416- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7417+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7418 "2:\t.section .fixup, \"ax\"\n"
7419 "3:\tmov %3, %0\n"
7420 "\tjmp 2b\n"
7421 "\t.previous\n"
7422 _ASM_EXTABLE(1b, 3b)
7423- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7424+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7425 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7426 : "memory"
7427 );
7428diff -urNp linux-3.0.7/arch/x86/include/asm/hw_irq.h linux-3.0.7/arch/x86/include/asm/hw_irq.h
7429--- linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-07-21 22:17:23.000000000 -0400
7430+++ linux-3.0.7/arch/x86/include/asm/hw_irq.h 2011-08-23 21:47:55.000000000 -0400
7431@@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7432 extern void enable_IO_APIC(void);
7433
7434 /* Statistics */
7435-extern atomic_t irq_err_count;
7436-extern atomic_t irq_mis_count;
7437+extern atomic_unchecked_t irq_err_count;
7438+extern atomic_unchecked_t irq_mis_count;
7439
7440 /* EISA */
7441 extern void eisa_set_level_irq(unsigned int irq);
7442diff -urNp linux-3.0.7/arch/x86/include/asm/i387.h linux-3.0.7/arch/x86/include/asm/i387.h
7443--- linux-3.0.7/arch/x86/include/asm/i387.h 2011-07-21 22:17:23.000000000 -0400
7444+++ linux-3.0.7/arch/x86/include/asm/i387.h 2011-10-06 04:17:55.000000000 -0400
7445@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7446 {
7447 int err;
7448
7449+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7450+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7451+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7452+#endif
7453+
7454 /* See comment in fxsave() below. */
7455 #ifdef CONFIG_AS_FXSAVEQ
7456 asm volatile("1: fxrstorq %[fx]\n\t"
7457@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7458 {
7459 int err;
7460
7461+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7462+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7463+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7464+#endif
7465+
7466 /*
7467 * Clear the bytes not touched by the fxsave and reserved
7468 * for the SW usage.
7469@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7470 #endif /* CONFIG_X86_64 */
7471
7472 /* We need a safe address that is cheap to find and that is already
7473- in L1 during context switch. The best choices are unfortunately
7474- different for UP and SMP */
7475-#ifdef CONFIG_SMP
7476-#define safe_address (__per_cpu_offset[0])
7477-#else
7478-#define safe_address (kstat_cpu(0).cpustat.user)
7479-#endif
7480+ in L1 during context switch. */
7481+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7482
7483 /*
7484 * These must be called with preempt disabled
7485@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7486 struct thread_info *me = current_thread_info();
7487 preempt_disable();
7488 if (me->status & TS_USEDFPU)
7489- __save_init_fpu(me->task);
7490+ __save_init_fpu(current);
7491 else
7492 clts();
7493 }
7494diff -urNp linux-3.0.7/arch/x86/include/asm/io.h linux-3.0.7/arch/x86/include/asm/io.h
7495--- linux-3.0.7/arch/x86/include/asm/io.h 2011-07-21 22:17:23.000000000 -0400
7496+++ linux-3.0.7/arch/x86/include/asm/io.h 2011-08-23 21:47:55.000000000 -0400
7497@@ -196,6 +196,17 @@ extern void set_iounmap_nonlazy(void);
7498
7499 #include <linux/vmalloc.h>
7500
7501+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7502+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7503+{
7504+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7505+}
7506+
7507+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7508+{
7509+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7510+}
7511+
7512 /*
7513 * Convert a virtual cached pointer to an uncached pointer
7514 */
7515diff -urNp linux-3.0.7/arch/x86/include/asm/irqflags.h linux-3.0.7/arch/x86/include/asm/irqflags.h
7516--- linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-07-21 22:17:23.000000000 -0400
7517+++ linux-3.0.7/arch/x86/include/asm/irqflags.h 2011-08-23 21:47:55.000000000 -0400
7518@@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7519 sti; \
7520 sysexit
7521
7522+#define GET_CR0_INTO_RDI mov %cr0, %rdi
7523+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7524+#define GET_CR3_INTO_RDI mov %cr3, %rdi
7525+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7526+
7527 #else
7528 #define INTERRUPT_RETURN iret
7529 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7530diff -urNp linux-3.0.7/arch/x86/include/asm/kprobes.h linux-3.0.7/arch/x86/include/asm/kprobes.h
7531--- linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-07-21 22:17:23.000000000 -0400
7532+++ linux-3.0.7/arch/x86/include/asm/kprobes.h 2011-08-23 21:47:55.000000000 -0400
7533@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7534 #define RELATIVEJUMP_SIZE 5
7535 #define RELATIVECALL_OPCODE 0xe8
7536 #define RELATIVE_ADDR_SIZE 4
7537-#define MAX_STACK_SIZE 64
7538-#define MIN_STACK_SIZE(ADDR) \
7539- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7540- THREAD_SIZE - (unsigned long)(ADDR))) \
7541- ? (MAX_STACK_SIZE) \
7542- : (((unsigned long)current_thread_info()) + \
7543- THREAD_SIZE - (unsigned long)(ADDR)))
7544+#define MAX_STACK_SIZE 64UL
7545+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7546
7547 #define flush_insn_slot(p) do { } while (0)
7548
7549diff -urNp linux-3.0.7/arch/x86/include/asm/kvm_host.h linux-3.0.7/arch/x86/include/asm/kvm_host.h
7550--- linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
7551+++ linux-3.0.7/arch/x86/include/asm/kvm_host.h 2011-08-26 19:49:56.000000000 -0400
7552@@ -441,7 +441,7 @@ struct kvm_arch {
7553 unsigned int n_used_mmu_pages;
7554 unsigned int n_requested_mmu_pages;
7555 unsigned int n_max_mmu_pages;
7556- atomic_t invlpg_counter;
7557+ atomic_unchecked_t invlpg_counter;
7558 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7559 /*
7560 * Hash table of struct kvm_mmu_page.
7561@@ -619,7 +619,7 @@ struct kvm_x86_ops {
7562 enum x86_intercept_stage stage);
7563
7564 const struct trace_print_flags *exit_reasons_str;
7565-};
7566+} __do_const;
7567
7568 struct kvm_arch_async_pf {
7569 u32 token;
7570diff -urNp linux-3.0.7/arch/x86/include/asm/local.h linux-3.0.7/arch/x86/include/asm/local.h
7571--- linux-3.0.7/arch/x86/include/asm/local.h 2011-07-21 22:17:23.000000000 -0400
7572+++ linux-3.0.7/arch/x86/include/asm/local.h 2011-08-23 21:47:55.000000000 -0400
7573@@ -18,26 +18,58 @@ typedef struct {
7574
7575 static inline void local_inc(local_t *l)
7576 {
7577- asm volatile(_ASM_INC "%0"
7578+ asm volatile(_ASM_INC "%0\n"
7579+
7580+#ifdef CONFIG_PAX_REFCOUNT
7581+ "jno 0f\n"
7582+ _ASM_DEC "%0\n"
7583+ "int $4\n0:\n"
7584+ _ASM_EXTABLE(0b, 0b)
7585+#endif
7586+
7587 : "+m" (l->a.counter));
7588 }
7589
7590 static inline void local_dec(local_t *l)
7591 {
7592- asm volatile(_ASM_DEC "%0"
7593+ asm volatile(_ASM_DEC "%0\n"
7594+
7595+#ifdef CONFIG_PAX_REFCOUNT
7596+ "jno 0f\n"
7597+ _ASM_INC "%0\n"
7598+ "int $4\n0:\n"
7599+ _ASM_EXTABLE(0b, 0b)
7600+#endif
7601+
7602 : "+m" (l->a.counter));
7603 }
7604
7605 static inline void local_add(long i, local_t *l)
7606 {
7607- asm volatile(_ASM_ADD "%1,%0"
7608+ asm volatile(_ASM_ADD "%1,%0\n"
7609+
7610+#ifdef CONFIG_PAX_REFCOUNT
7611+ "jno 0f\n"
7612+ _ASM_SUB "%1,%0\n"
7613+ "int $4\n0:\n"
7614+ _ASM_EXTABLE(0b, 0b)
7615+#endif
7616+
7617 : "+m" (l->a.counter)
7618 : "ir" (i));
7619 }
7620
7621 static inline void local_sub(long i, local_t *l)
7622 {
7623- asm volatile(_ASM_SUB "%1,%0"
7624+ asm volatile(_ASM_SUB "%1,%0\n"
7625+
7626+#ifdef CONFIG_PAX_REFCOUNT
7627+ "jno 0f\n"
7628+ _ASM_ADD "%1,%0\n"
7629+ "int $4\n0:\n"
7630+ _ASM_EXTABLE(0b, 0b)
7631+#endif
7632+
7633 : "+m" (l->a.counter)
7634 : "ir" (i));
7635 }
7636@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7637 {
7638 unsigned char c;
7639
7640- asm volatile(_ASM_SUB "%2,%0; sete %1"
7641+ asm volatile(_ASM_SUB "%2,%0\n"
7642+
7643+#ifdef CONFIG_PAX_REFCOUNT
7644+ "jno 0f\n"
7645+ _ASM_ADD "%2,%0\n"
7646+ "int $4\n0:\n"
7647+ _ASM_EXTABLE(0b, 0b)
7648+#endif
7649+
7650+ "sete %1\n"
7651 : "+m" (l->a.counter), "=qm" (c)
7652 : "ir" (i) : "memory");
7653 return c;
7654@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7655 {
7656 unsigned char c;
7657
7658- asm volatile(_ASM_DEC "%0; sete %1"
7659+ asm volatile(_ASM_DEC "%0\n"
7660+
7661+#ifdef CONFIG_PAX_REFCOUNT
7662+ "jno 0f\n"
7663+ _ASM_INC "%0\n"
7664+ "int $4\n0:\n"
7665+ _ASM_EXTABLE(0b, 0b)
7666+#endif
7667+
7668+ "sete %1\n"
7669 : "+m" (l->a.counter), "=qm" (c)
7670 : : "memory");
7671 return c != 0;
7672@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7673 {
7674 unsigned char c;
7675
7676- asm volatile(_ASM_INC "%0; sete %1"
7677+ asm volatile(_ASM_INC "%0\n"
7678+
7679+#ifdef CONFIG_PAX_REFCOUNT
7680+ "jno 0f\n"
7681+ _ASM_DEC "%0\n"
7682+ "int $4\n0:\n"
7683+ _ASM_EXTABLE(0b, 0b)
7684+#endif
7685+
7686+ "sete %1\n"
7687 : "+m" (l->a.counter), "=qm" (c)
7688 : : "memory");
7689 return c != 0;
7690@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7691 {
7692 unsigned char c;
7693
7694- asm volatile(_ASM_ADD "%2,%0; sets %1"
7695+ asm volatile(_ASM_ADD "%2,%0\n"
7696+
7697+#ifdef CONFIG_PAX_REFCOUNT
7698+ "jno 0f\n"
7699+ _ASM_SUB "%2,%0\n"
7700+ "int $4\n0:\n"
7701+ _ASM_EXTABLE(0b, 0b)
7702+#endif
7703+
7704+ "sets %1\n"
7705 : "+m" (l->a.counter), "=qm" (c)
7706 : "ir" (i) : "memory");
7707 return c;
7708@@ -133,7 +201,15 @@ static inline long local_add_return(long
7709 #endif
7710 /* Modern 486+ processor */
7711 __i = i;
7712- asm volatile(_ASM_XADD "%0, %1;"
7713+ asm volatile(_ASM_XADD "%0, %1\n"
7714+
7715+#ifdef CONFIG_PAX_REFCOUNT
7716+ "jno 0f\n"
7717+ _ASM_MOV "%0,%1\n"
7718+ "int $4\n0:\n"
7719+ _ASM_EXTABLE(0b, 0b)
7720+#endif
7721+
7722 : "+r" (i), "+m" (l->a.counter)
7723 : : "memory");
7724 return i + __i;
7725diff -urNp linux-3.0.7/arch/x86/include/asm/mman.h linux-3.0.7/arch/x86/include/asm/mman.h
7726--- linux-3.0.7/arch/x86/include/asm/mman.h 2011-07-21 22:17:23.000000000 -0400
7727+++ linux-3.0.7/arch/x86/include/asm/mman.h 2011-08-23 21:47:55.000000000 -0400
7728@@ -5,4 +5,14 @@
7729
7730 #include <asm-generic/mman.h>
7731
7732+#ifdef __KERNEL__
7733+#ifndef __ASSEMBLY__
7734+#ifdef CONFIG_X86_32
7735+#define arch_mmap_check i386_mmap_check
7736+int i386_mmap_check(unsigned long addr, unsigned long len,
7737+ unsigned long flags);
7738+#endif
7739+#endif
7740+#endif
7741+
7742 #endif /* _ASM_X86_MMAN_H */
7743diff -urNp linux-3.0.7/arch/x86/include/asm/mmu_context.h linux-3.0.7/arch/x86/include/asm/mmu_context.h
7744--- linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-07-21 22:17:23.000000000 -0400
7745+++ linux-3.0.7/arch/x86/include/asm/mmu_context.h 2011-08-23 21:48:14.000000000 -0400
7746@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7747
7748 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7749 {
7750+
7751+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7752+ unsigned int i;
7753+ pgd_t *pgd;
7754+
7755+ pax_open_kernel();
7756+ pgd = get_cpu_pgd(smp_processor_id());
7757+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7758+ set_pgd_batched(pgd+i, native_make_pgd(0));
7759+ pax_close_kernel();
7760+#endif
7761+
7762 #ifdef CONFIG_SMP
7763 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7764 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7765@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7766 struct task_struct *tsk)
7767 {
7768 unsigned cpu = smp_processor_id();
7769+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7770+ int tlbstate = TLBSTATE_OK;
7771+#endif
7772
7773 if (likely(prev != next)) {
7774 #ifdef CONFIG_SMP
7775+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7776+ tlbstate = percpu_read(cpu_tlbstate.state);
7777+#endif
7778 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7779 percpu_write(cpu_tlbstate.active_mm, next);
7780 #endif
7781 cpumask_set_cpu(cpu, mm_cpumask(next));
7782
7783 /* Re-load page tables */
7784+#ifdef CONFIG_PAX_PER_CPU_PGD
7785+ pax_open_kernel();
7786+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7787+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7788+ pax_close_kernel();
7789+ load_cr3(get_cpu_pgd(cpu));
7790+#else
7791 load_cr3(next->pgd);
7792+#endif
7793
7794 /* stop flush ipis for the previous mm */
7795 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7796@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7797 */
7798 if (unlikely(prev->context.ldt != next->context.ldt))
7799 load_LDT_nolock(&next->context);
7800- }
7801+
7802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7803+ if (!(__supported_pte_mask & _PAGE_NX)) {
7804+ smp_mb__before_clear_bit();
7805+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7806+ smp_mb__after_clear_bit();
7807+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7808+ }
7809+#endif
7810+
7811+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7812+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7813+ prev->context.user_cs_limit != next->context.user_cs_limit))
7814+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7815 #ifdef CONFIG_SMP
7816+ else if (unlikely(tlbstate != TLBSTATE_OK))
7817+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7818+#endif
7819+#endif
7820+
7821+ }
7822 else {
7823+
7824+#ifdef CONFIG_PAX_PER_CPU_PGD
7825+ pax_open_kernel();
7826+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7827+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7828+ pax_close_kernel();
7829+ load_cr3(get_cpu_pgd(cpu));
7830+#endif
7831+
7832+#ifdef CONFIG_SMP
7833 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7834 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7835
7836@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7837 * tlb flush IPI delivery. We must reload CR3
7838 * to make sure to use no freed page tables.
7839 */
7840+
7841+#ifndef CONFIG_PAX_PER_CPU_PGD
7842 load_cr3(next->pgd);
7843+#endif
7844+
7845 load_LDT_nolock(&next->context);
7846+
7847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7848+ if (!(__supported_pte_mask & _PAGE_NX))
7849+ cpu_set(cpu, next->context.cpu_user_cs_mask);
7850+#endif
7851+
7852+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7853+#ifdef CONFIG_PAX_PAGEEXEC
7854+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7855+#endif
7856+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7857+#endif
7858+
7859 }
7860- }
7861 #endif
7862+ }
7863 }
7864
7865 #define activate_mm(prev, next) \
7866diff -urNp linux-3.0.7/arch/x86/include/asm/mmu.h linux-3.0.7/arch/x86/include/asm/mmu.h
7867--- linux-3.0.7/arch/x86/include/asm/mmu.h 2011-07-21 22:17:23.000000000 -0400
7868+++ linux-3.0.7/arch/x86/include/asm/mmu.h 2011-08-23 21:47:55.000000000 -0400
7869@@ -9,7 +9,7 @@
7870 * we put the segment information here.
7871 */
7872 typedef struct {
7873- void *ldt;
7874+ struct desc_struct *ldt;
7875 int size;
7876
7877 #ifdef CONFIG_X86_64
7878@@ -18,7 +18,19 @@ typedef struct {
7879 #endif
7880
7881 struct mutex lock;
7882- void *vdso;
7883+ unsigned long vdso;
7884+
7885+#ifdef CONFIG_X86_32
7886+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7887+ unsigned long user_cs_base;
7888+ unsigned long user_cs_limit;
7889+
7890+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7891+ cpumask_t cpu_user_cs_mask;
7892+#endif
7893+
7894+#endif
7895+#endif
7896 } mm_context_t;
7897
7898 #ifdef CONFIG_SMP
7899diff -urNp linux-3.0.7/arch/x86/include/asm/module.h linux-3.0.7/arch/x86/include/asm/module.h
7900--- linux-3.0.7/arch/x86/include/asm/module.h 2011-07-21 22:17:23.000000000 -0400
7901+++ linux-3.0.7/arch/x86/include/asm/module.h 2011-10-07 19:24:31.000000000 -0400
7902@@ -5,6 +5,7 @@
7903
7904 #ifdef CONFIG_X86_64
7905 /* X86_64 does not define MODULE_PROC_FAMILY */
7906+#define MODULE_PROC_FAMILY ""
7907 #elif defined CONFIG_M386
7908 #define MODULE_PROC_FAMILY "386 "
7909 #elif defined CONFIG_M486
7910@@ -59,8 +60,18 @@
7911 #error unknown processor family
7912 #endif
7913
7914-#ifdef CONFIG_X86_32
7915-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7916+#ifdef CONFIG_PAX_KERNEXEC
7917+#define MODULE_PAX_KERNEXEC "KERNEXEC "
7918+#else
7919+#define MODULE_PAX_KERNEXEC ""
7920 #endif
7921
7922+#ifdef CONFIG_PAX_MEMORY_UDEREF
7923+#define MODULE_PAX_UDEREF "UDEREF "
7924+#else
7925+#define MODULE_PAX_UDEREF ""
7926+#endif
7927+
7928+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7929+
7930 #endif /* _ASM_X86_MODULE_H */
7931diff -urNp linux-3.0.7/arch/x86/include/asm/page_64_types.h linux-3.0.7/arch/x86/include/asm/page_64_types.h
7932--- linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-07-21 22:17:23.000000000 -0400
7933+++ linux-3.0.7/arch/x86/include/asm/page_64_types.h 2011-08-23 21:47:55.000000000 -0400
7934@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7935
7936 /* duplicated to the one in bootmem.h */
7937 extern unsigned long max_pfn;
7938-extern unsigned long phys_base;
7939+extern const unsigned long phys_base;
7940
7941 extern unsigned long __phys_addr(unsigned long);
7942 #define __phys_reloc_hide(x) (x)
7943diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt.h linux-3.0.7/arch/x86/include/asm/paravirt.h
7944--- linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-07-21 22:17:23.000000000 -0400
7945+++ linux-3.0.7/arch/x86/include/asm/paravirt.h 2011-08-23 21:47:55.000000000 -0400
7946@@ -658,6 +658,18 @@ static inline void set_pgd(pgd_t *pgdp,
7947 val);
7948 }
7949
7950+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7951+{
7952+ pgdval_t val = native_pgd_val(pgd);
7953+
7954+ if (sizeof(pgdval_t) > sizeof(long))
7955+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7956+ val, (u64)val >> 32);
7957+ else
7958+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7959+ val);
7960+}
7961+
7962 static inline void pgd_clear(pgd_t *pgdp)
7963 {
7964 set_pgd(pgdp, __pgd(0));
7965@@ -739,6 +751,21 @@ static inline void __set_fixmap(unsigned
7966 pv_mmu_ops.set_fixmap(idx, phys, flags);
7967 }
7968
7969+#ifdef CONFIG_PAX_KERNEXEC
7970+static inline unsigned long pax_open_kernel(void)
7971+{
7972+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7973+}
7974+
7975+static inline unsigned long pax_close_kernel(void)
7976+{
7977+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7978+}
7979+#else
7980+static inline unsigned long pax_open_kernel(void) { return 0; }
7981+static inline unsigned long pax_close_kernel(void) { return 0; }
7982+#endif
7983+
7984 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7985
7986 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7987@@ -955,7 +982,7 @@ extern void default_banner(void);
7988
7989 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7990 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7991-#define PARA_INDIRECT(addr) *%cs:addr
7992+#define PARA_INDIRECT(addr) *%ss:addr
7993 #endif
7994
7995 #define INTERRUPT_RETURN \
7996@@ -1032,6 +1059,21 @@ extern void default_banner(void);
7997 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7998 CLBR_NONE, \
7999 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8000+
8001+#define GET_CR0_INTO_RDI \
8002+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8003+ mov %rax,%rdi
8004+
8005+#define SET_RDI_INTO_CR0 \
8006+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8007+
8008+#define GET_CR3_INTO_RDI \
8009+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8010+ mov %rax,%rdi
8011+
8012+#define SET_RDI_INTO_CR3 \
8013+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8014+
8015 #endif /* CONFIG_X86_32 */
8016
8017 #endif /* __ASSEMBLY__ */
8018diff -urNp linux-3.0.7/arch/x86/include/asm/paravirt_types.h linux-3.0.7/arch/x86/include/asm/paravirt_types.h
8019--- linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-07-21 22:17:23.000000000 -0400
8020+++ linux-3.0.7/arch/x86/include/asm/paravirt_types.h 2011-08-23 21:47:55.000000000 -0400
8021@@ -78,19 +78,19 @@ struct pv_init_ops {
8022 */
8023 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8024 unsigned long addr, unsigned len);
8025-};
8026+} __no_const;
8027
8028
8029 struct pv_lazy_ops {
8030 /* Set deferred update mode, used for batching operations. */
8031 void (*enter)(void);
8032 void (*leave)(void);
8033-};
8034+} __no_const;
8035
8036 struct pv_time_ops {
8037 unsigned long long (*sched_clock)(void);
8038 unsigned long (*get_tsc_khz)(void);
8039-};
8040+} __no_const;
8041
8042 struct pv_cpu_ops {
8043 /* hooks for various privileged instructions */
8044@@ -186,7 +186,7 @@ struct pv_cpu_ops {
8045
8046 void (*start_context_switch)(struct task_struct *prev);
8047 void (*end_context_switch)(struct task_struct *next);
8048-};
8049+} __no_const;
8050
8051 struct pv_irq_ops {
8052 /*
8053@@ -217,7 +217,7 @@ struct pv_apic_ops {
8054 unsigned long start_eip,
8055 unsigned long start_esp);
8056 #endif
8057-};
8058+} __no_const;
8059
8060 struct pv_mmu_ops {
8061 unsigned long (*read_cr2)(void);
8062@@ -306,6 +306,7 @@ struct pv_mmu_ops {
8063 struct paravirt_callee_save make_pud;
8064
8065 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8066+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8067 #endif /* PAGETABLE_LEVELS == 4 */
8068 #endif /* PAGETABLE_LEVELS >= 3 */
8069
8070@@ -317,6 +318,12 @@ struct pv_mmu_ops {
8071 an mfn. We can tell which is which from the index. */
8072 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8073 phys_addr_t phys, pgprot_t flags);
8074+
8075+#ifdef CONFIG_PAX_KERNEXEC
8076+ unsigned long (*pax_open_kernel)(void);
8077+ unsigned long (*pax_close_kernel)(void);
8078+#endif
8079+
8080 };
8081
8082 struct arch_spinlock;
8083@@ -327,7 +334,7 @@ struct pv_lock_ops {
8084 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8085 int (*spin_trylock)(struct arch_spinlock *lock);
8086 void (*spin_unlock)(struct arch_spinlock *lock);
8087-};
8088+} __no_const;
8089
8090 /* This contains all the paravirt structures: we get a convenient
8091 * number for each function using the offset which we use to indicate
8092diff -urNp linux-3.0.7/arch/x86/include/asm/pgalloc.h linux-3.0.7/arch/x86/include/asm/pgalloc.h
8093--- linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-07-21 22:17:23.000000000 -0400
8094+++ linux-3.0.7/arch/x86/include/asm/pgalloc.h 2011-08-23 21:47:55.000000000 -0400
8095@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8096 pmd_t *pmd, pte_t *pte)
8097 {
8098 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8099+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8100+}
8101+
8102+static inline void pmd_populate_user(struct mm_struct *mm,
8103+ pmd_t *pmd, pte_t *pte)
8104+{
8105+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8106 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8107 }
8108
8109diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-2level.h linux-3.0.7/arch/x86/include/asm/pgtable-2level.h
8110--- linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-07-21 22:17:23.000000000 -0400
8111+++ linux-3.0.7/arch/x86/include/asm/pgtable-2level.h 2011-08-23 21:47:55.000000000 -0400
8112@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8113
8114 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8115 {
8116+ pax_open_kernel();
8117 *pmdp = pmd;
8118+ pax_close_kernel();
8119 }
8120
8121 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8122diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32.h linux-3.0.7/arch/x86/include/asm/pgtable_32.h
8123--- linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-07-21 22:17:23.000000000 -0400
8124+++ linux-3.0.7/arch/x86/include/asm/pgtable_32.h 2011-08-23 21:47:55.000000000 -0400
8125@@ -25,9 +25,6 @@
8126 struct mm_struct;
8127 struct vm_area_struct;
8128
8129-extern pgd_t swapper_pg_dir[1024];
8130-extern pgd_t initial_page_table[1024];
8131-
8132 static inline void pgtable_cache_init(void) { }
8133 static inline void check_pgt_cache(void) { }
8134 void paging_init(void);
8135@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8136 # include <asm/pgtable-2level.h>
8137 #endif
8138
8139+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8140+extern pgd_t initial_page_table[PTRS_PER_PGD];
8141+#ifdef CONFIG_X86_PAE
8142+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8143+#endif
8144+
8145 #if defined(CONFIG_HIGHPTE)
8146 #define pte_offset_map(dir, address) \
8147 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8148@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8149 /* Clear a kernel PTE and flush it from the TLB */
8150 #define kpte_clear_flush(ptep, vaddr) \
8151 do { \
8152+ pax_open_kernel(); \
8153 pte_clear(&init_mm, (vaddr), (ptep)); \
8154+ pax_close_kernel(); \
8155 __flush_tlb_one((vaddr)); \
8156 } while (0)
8157
8158@@ -74,6 +79,9 @@ do { \
8159
8160 #endif /* !__ASSEMBLY__ */
8161
8162+#define HAVE_ARCH_UNMAPPED_AREA
8163+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8164+
8165 /*
8166 * kern_addr_valid() is (1) for FLATMEM and (0) for
8167 * SPARSEMEM and DISCONTIGMEM
8168diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h
8169--- linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-07-21 22:17:23.000000000 -0400
8170+++ linux-3.0.7/arch/x86/include/asm/pgtable_32_types.h 2011-08-23 21:47:55.000000000 -0400
8171@@ -8,7 +8,7 @@
8172 */
8173 #ifdef CONFIG_X86_PAE
8174 # include <asm/pgtable-3level_types.h>
8175-# define PMD_SIZE (1UL << PMD_SHIFT)
8176+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8177 # define PMD_MASK (~(PMD_SIZE - 1))
8178 #else
8179 # include <asm/pgtable-2level_types.h>
8180@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8181 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8182 #endif
8183
8184+#ifdef CONFIG_PAX_KERNEXEC
8185+#ifndef __ASSEMBLY__
8186+extern unsigned char MODULES_EXEC_VADDR[];
8187+extern unsigned char MODULES_EXEC_END[];
8188+#endif
8189+#include <asm/boot.h>
8190+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8191+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8192+#else
8193+#define ktla_ktva(addr) (addr)
8194+#define ktva_ktla(addr) (addr)
8195+#endif
8196+
8197 #define MODULES_VADDR VMALLOC_START
8198 #define MODULES_END VMALLOC_END
8199 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8200diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable-3level.h linux-3.0.7/arch/x86/include/asm/pgtable-3level.h
8201--- linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-07-21 22:17:23.000000000 -0400
8202+++ linux-3.0.7/arch/x86/include/asm/pgtable-3level.h 2011-08-23 21:47:55.000000000 -0400
8203@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8204
8205 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8206 {
8207+ pax_open_kernel();
8208 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8209+ pax_close_kernel();
8210 }
8211
8212 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8213 {
8214+ pax_open_kernel();
8215 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8216+ pax_close_kernel();
8217 }
8218
8219 /*
8220diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64.h linux-3.0.7/arch/x86/include/asm/pgtable_64.h
8221--- linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-07-21 22:17:23.000000000 -0400
8222+++ linux-3.0.7/arch/x86/include/asm/pgtable_64.h 2011-08-23 21:47:55.000000000 -0400
8223@@ -16,10 +16,13 @@
8224
8225 extern pud_t level3_kernel_pgt[512];
8226 extern pud_t level3_ident_pgt[512];
8227+extern pud_t level3_vmalloc_pgt[512];
8228+extern pud_t level3_vmemmap_pgt[512];
8229+extern pud_t level2_vmemmap_pgt[512];
8230 extern pmd_t level2_kernel_pgt[512];
8231 extern pmd_t level2_fixmap_pgt[512];
8232-extern pmd_t level2_ident_pgt[512];
8233-extern pgd_t init_level4_pgt[];
8234+extern pmd_t level2_ident_pgt[512*2];
8235+extern pgd_t init_level4_pgt[512];
8236
8237 #define swapper_pg_dir init_level4_pgt
8238
8239@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8240
8241 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8242 {
8243+ pax_open_kernel();
8244 *pmdp = pmd;
8245+ pax_close_kernel();
8246 }
8247
8248 static inline void native_pmd_clear(pmd_t *pmd)
8249@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8250
8251 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8252 {
8253+ pax_open_kernel();
8254+ *pgdp = pgd;
8255+ pax_close_kernel();
8256+}
8257+
8258+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8259+{
8260 *pgdp = pgd;
8261 }
8262
8263diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h
8264--- linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-07-21 22:17:23.000000000 -0400
8265+++ linux-3.0.7/arch/x86/include/asm/pgtable_64_types.h 2011-08-23 21:47:55.000000000 -0400
8266@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8267 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8268 #define MODULES_END _AC(0xffffffffff000000, UL)
8269 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8270+#define MODULES_EXEC_VADDR MODULES_VADDR
8271+#define MODULES_EXEC_END MODULES_END
8272+
8273+#define ktla_ktva(addr) (addr)
8274+#define ktva_ktla(addr) (addr)
8275
8276 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8277diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable.h linux-3.0.7/arch/x86/include/asm/pgtable.h
8278--- linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-07-21 22:17:23.000000000 -0400
8279+++ linux-3.0.7/arch/x86/include/asm/pgtable.h 2011-08-23 21:47:55.000000000 -0400
8280@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8281
8282 #ifndef __PAGETABLE_PUD_FOLDED
8283 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8284+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8285 #define pgd_clear(pgd) native_pgd_clear(pgd)
8286 #endif
8287
8288@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8289
8290 #define arch_end_context_switch(prev) do {} while(0)
8291
8292+#define pax_open_kernel() native_pax_open_kernel()
8293+#define pax_close_kernel() native_pax_close_kernel()
8294 #endif /* CONFIG_PARAVIRT */
8295
8296+#define __HAVE_ARCH_PAX_OPEN_KERNEL
8297+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8298+
8299+#ifdef CONFIG_PAX_KERNEXEC
8300+static inline unsigned long native_pax_open_kernel(void)
8301+{
8302+ unsigned long cr0;
8303+
8304+ preempt_disable();
8305+ barrier();
8306+ cr0 = read_cr0() ^ X86_CR0_WP;
8307+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8308+ write_cr0(cr0);
8309+ return cr0 ^ X86_CR0_WP;
8310+}
8311+
8312+static inline unsigned long native_pax_close_kernel(void)
8313+{
8314+ unsigned long cr0;
8315+
8316+ cr0 = read_cr0() ^ X86_CR0_WP;
8317+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8318+ write_cr0(cr0);
8319+ barrier();
8320+ preempt_enable_no_resched();
8321+ return cr0 ^ X86_CR0_WP;
8322+}
8323+#else
8324+static inline unsigned long native_pax_open_kernel(void) { return 0; }
8325+static inline unsigned long native_pax_close_kernel(void) { return 0; }
8326+#endif
8327+
8328 /*
8329 * The following only work if pte_present() is true.
8330 * Undefined behaviour if not..
8331 */
8332+static inline int pte_user(pte_t pte)
8333+{
8334+ return pte_val(pte) & _PAGE_USER;
8335+}
8336+
8337 static inline int pte_dirty(pte_t pte)
8338 {
8339 return pte_flags(pte) & _PAGE_DIRTY;
8340@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8341 return pte_clear_flags(pte, _PAGE_RW);
8342 }
8343
8344+static inline pte_t pte_mkread(pte_t pte)
8345+{
8346+ return __pte(pte_val(pte) | _PAGE_USER);
8347+}
8348+
8349 static inline pte_t pte_mkexec(pte_t pte)
8350 {
8351- return pte_clear_flags(pte, _PAGE_NX);
8352+#ifdef CONFIG_X86_PAE
8353+ if (__supported_pte_mask & _PAGE_NX)
8354+ return pte_clear_flags(pte, _PAGE_NX);
8355+ else
8356+#endif
8357+ return pte_set_flags(pte, _PAGE_USER);
8358+}
8359+
8360+static inline pte_t pte_exprotect(pte_t pte)
8361+{
8362+#ifdef CONFIG_X86_PAE
8363+ if (__supported_pte_mask & _PAGE_NX)
8364+ return pte_set_flags(pte, _PAGE_NX);
8365+ else
8366+#endif
8367+ return pte_clear_flags(pte, _PAGE_USER);
8368 }
8369
8370 static inline pte_t pte_mkdirty(pte_t pte)
8371@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8372 #endif
8373
8374 #ifndef __ASSEMBLY__
8375+
8376+#ifdef CONFIG_PAX_PER_CPU_PGD
8377+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8378+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8379+{
8380+ return cpu_pgd[cpu];
8381+}
8382+#endif
8383+
8384 #include <linux/mm_types.h>
8385
8386 static inline int pte_none(pte_t pte)
8387@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8388
8389 static inline int pgd_bad(pgd_t pgd)
8390 {
8391- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8392+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8393 }
8394
8395 static inline int pgd_none(pgd_t pgd)
8396@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8397 * pgd_offset() returns a (pgd_t *)
8398 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8399 */
8400-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8401+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8402+
8403+#ifdef CONFIG_PAX_PER_CPU_PGD
8404+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8405+#endif
8406+
8407 /*
8408 * a shortcut which implies the use of the kernel's pgd, instead
8409 * of a process's
8410@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8411 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8412 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8413
8414+#ifdef CONFIG_X86_32
8415+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8416+#else
8417+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8418+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8419+
8420+#ifdef CONFIG_PAX_MEMORY_UDEREF
8421+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8422+#else
8423+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8424+#endif
8425+
8426+#endif
8427+
8428 #ifndef __ASSEMBLY__
8429
8430 extern int direct_gbpages;
8431@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8432 * dst and src can be on the same page, but the range must not overlap,
8433 * and must not cross a page boundary.
8434 */
8435-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8436+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8437 {
8438- memcpy(dst, src, count * sizeof(pgd_t));
8439+ pax_open_kernel();
8440+ while (count--)
8441+ *dst++ = *src++;
8442+ pax_close_kernel();
8443 }
8444
8445+#ifdef CONFIG_PAX_PER_CPU_PGD
8446+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8447+#endif
8448+
8449+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8450+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8451+#else
8452+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8453+#endif
8454
8455 #include <asm-generic/pgtable.h>
8456 #endif /* __ASSEMBLY__ */
8457diff -urNp linux-3.0.7/arch/x86/include/asm/pgtable_types.h linux-3.0.7/arch/x86/include/asm/pgtable_types.h
8458--- linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-07-21 22:17:23.000000000 -0400
8459+++ linux-3.0.7/arch/x86/include/asm/pgtable_types.h 2011-08-23 21:47:55.000000000 -0400
8460@@ -16,13 +16,12 @@
8461 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8462 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8463 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8464-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8465+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8466 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8467 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8468 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8469-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8470-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8471-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8472+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8473+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8474 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8475
8476 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8477@@ -40,7 +39,6 @@
8478 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8479 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8480 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8481-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8482 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8483 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8484 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8485@@ -57,8 +55,10 @@
8486
8487 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8488 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8489-#else
8490+#elif defined(CONFIG_KMEMCHECK)
8491 #define _PAGE_NX (_AT(pteval_t, 0))
8492+#else
8493+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8494 #endif
8495
8496 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8497@@ -96,6 +96,9 @@
8498 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8499 _PAGE_ACCESSED)
8500
8501+#define PAGE_READONLY_NOEXEC PAGE_READONLY
8502+#define PAGE_SHARED_NOEXEC PAGE_SHARED
8503+
8504 #define __PAGE_KERNEL_EXEC \
8505 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8506 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8507@@ -106,8 +109,8 @@
8508 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8509 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8510 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8511-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8512-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8513+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8514+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8515 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8516 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8517 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8518@@ -166,8 +169,8 @@
8519 * bits are combined, this will alow user to access the high address mapped
8520 * VDSO in the presence of CONFIG_COMPAT_VDSO
8521 */
8522-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8523-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8524+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8525+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8526 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8527 #endif
8528
8529@@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8530 {
8531 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8532 }
8533+#endif
8534
8535+#if PAGETABLE_LEVELS == 3
8536+#include <asm-generic/pgtable-nopud.h>
8537+#endif
8538+
8539+#if PAGETABLE_LEVELS == 2
8540+#include <asm-generic/pgtable-nopmd.h>
8541+#endif
8542+
8543+#ifndef __ASSEMBLY__
8544 #if PAGETABLE_LEVELS > 3
8545 typedef struct { pudval_t pud; } pud_t;
8546
8547@@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8548 return pud.pud;
8549 }
8550 #else
8551-#include <asm-generic/pgtable-nopud.h>
8552-
8553 static inline pudval_t native_pud_val(pud_t pud)
8554 {
8555 return native_pgd_val(pud.pgd);
8556@@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8557 return pmd.pmd;
8558 }
8559 #else
8560-#include <asm-generic/pgtable-nopmd.h>
8561-
8562 static inline pmdval_t native_pmd_val(pmd_t pmd)
8563 {
8564 return native_pgd_val(pmd.pud.pgd);
8565@@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8566
8567 extern pteval_t __supported_pte_mask;
8568 extern void set_nx(void);
8569-extern int nx_enabled;
8570
8571 #define pgprot_writecombine pgprot_writecombine
8572 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8573diff -urNp linux-3.0.7/arch/x86/include/asm/processor.h linux-3.0.7/arch/x86/include/asm/processor.h
8574--- linux-3.0.7/arch/x86/include/asm/processor.h 2011-07-21 22:17:23.000000000 -0400
8575+++ linux-3.0.7/arch/x86/include/asm/processor.h 2011-08-23 21:47:55.000000000 -0400
8576@@ -266,7 +266,7 @@ struct tss_struct {
8577
8578 } ____cacheline_aligned;
8579
8580-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8581+extern struct tss_struct init_tss[NR_CPUS];
8582
8583 /*
8584 * Save the original ist values for checking stack pointers during debugging
8585@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8586 */
8587 #define TASK_SIZE PAGE_OFFSET
8588 #define TASK_SIZE_MAX TASK_SIZE
8589+
8590+#ifdef CONFIG_PAX_SEGMEXEC
8591+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8592+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8593+#else
8594 #define STACK_TOP TASK_SIZE
8595-#define STACK_TOP_MAX STACK_TOP
8596+#endif
8597+
8598+#define STACK_TOP_MAX TASK_SIZE
8599
8600 #define INIT_THREAD { \
8601- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8602+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8603 .vm86_info = NULL, \
8604 .sysenter_cs = __KERNEL_CS, \
8605 .io_bitmap_ptr = NULL, \
8606@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8607 */
8608 #define INIT_TSS { \
8609 .x86_tss = { \
8610- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8611+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8612 .ss0 = __KERNEL_DS, \
8613 .ss1 = __KERNEL_CS, \
8614 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8615@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8616 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8617
8618 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8619-#define KSTK_TOP(info) \
8620-({ \
8621- unsigned long *__ptr = (unsigned long *)(info); \
8622- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8623-})
8624+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8625
8626 /*
8627 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8628@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8629 #define task_pt_regs(task) \
8630 ({ \
8631 struct pt_regs *__regs__; \
8632- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8633+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8634 __regs__ - 1; \
8635 })
8636
8637@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8638 /*
8639 * User space process size. 47bits minus one guard page.
8640 */
8641-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8642+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8643
8644 /* This decides where the kernel will search for a free chunk of vm
8645 * space during mmap's.
8646 */
8647 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8648- 0xc0000000 : 0xFFFFe000)
8649+ 0xc0000000 : 0xFFFFf000)
8650
8651 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8652 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8653@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8654 #define STACK_TOP_MAX TASK_SIZE_MAX
8655
8656 #define INIT_THREAD { \
8657- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8658+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8659 }
8660
8661 #define INIT_TSS { \
8662- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8663+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8664 }
8665
8666 /*
8667@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8668 */
8669 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8670
8671+#ifdef CONFIG_PAX_SEGMEXEC
8672+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8673+#endif
8674+
8675 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8676
8677 /* Get/set a process' ability to use the timestamp counter instruction */
8678diff -urNp linux-3.0.7/arch/x86/include/asm/ptrace.h linux-3.0.7/arch/x86/include/asm/ptrace.h
8679--- linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-07-21 22:17:23.000000000 -0400
8680+++ linux-3.0.7/arch/x86/include/asm/ptrace.h 2011-08-23 21:47:55.000000000 -0400
8681@@ -153,28 +153,29 @@ static inline unsigned long regs_return_
8682 }
8683
8684 /*
8685- * user_mode_vm(regs) determines whether a register set came from user mode.
8686+ * user_mode(regs) determines whether a register set came from user mode.
8687 * This is true if V8086 mode was enabled OR if the register set was from
8688 * protected mode with RPL-3 CS value. This tricky test checks that with
8689 * one comparison. Many places in the kernel can bypass this full check
8690- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8691+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8692+ * be used.
8693 */
8694-static inline int user_mode(struct pt_regs *regs)
8695+static inline int user_mode_novm(struct pt_regs *regs)
8696 {
8697 #ifdef CONFIG_X86_32
8698 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8699 #else
8700- return !!(regs->cs & 3);
8701+ return !!(regs->cs & SEGMENT_RPL_MASK);
8702 #endif
8703 }
8704
8705-static inline int user_mode_vm(struct pt_regs *regs)
8706+static inline int user_mode(struct pt_regs *regs)
8707 {
8708 #ifdef CONFIG_X86_32
8709 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8710 USER_RPL;
8711 #else
8712- return user_mode(regs);
8713+ return user_mode_novm(regs);
8714 #endif
8715 }
8716
8717diff -urNp linux-3.0.7/arch/x86/include/asm/reboot.h linux-3.0.7/arch/x86/include/asm/reboot.h
8718--- linux-3.0.7/arch/x86/include/asm/reboot.h 2011-07-21 22:17:23.000000000 -0400
8719+++ linux-3.0.7/arch/x86/include/asm/reboot.h 2011-08-23 21:47:55.000000000 -0400
8720@@ -6,19 +6,19 @@
8721 struct pt_regs;
8722
8723 struct machine_ops {
8724- void (*restart)(char *cmd);
8725- void (*halt)(void);
8726- void (*power_off)(void);
8727+ void (* __noreturn restart)(char *cmd);
8728+ void (* __noreturn halt)(void);
8729+ void (* __noreturn power_off)(void);
8730 void (*shutdown)(void);
8731 void (*crash_shutdown)(struct pt_regs *);
8732- void (*emergency_restart)(void);
8733-};
8734+ void (* __noreturn emergency_restart)(void);
8735+} __no_const;
8736
8737 extern struct machine_ops machine_ops;
8738
8739 void native_machine_crash_shutdown(struct pt_regs *regs);
8740 void native_machine_shutdown(void);
8741-void machine_real_restart(unsigned int type);
8742+void machine_real_restart(unsigned int type) __noreturn;
8743 /* These must match dispatch_table in reboot_32.S */
8744 #define MRR_BIOS 0
8745 #define MRR_APM 1
8746diff -urNp linux-3.0.7/arch/x86/include/asm/rwsem.h linux-3.0.7/arch/x86/include/asm/rwsem.h
8747--- linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-07-21 22:17:23.000000000 -0400
8748+++ linux-3.0.7/arch/x86/include/asm/rwsem.h 2011-08-23 21:47:55.000000000 -0400
8749@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8750 {
8751 asm volatile("# beginning down_read\n\t"
8752 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ LOCK_PREFIX _ASM_DEC "(%1)\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 /* adds 0x00000001 */
8762 " jns 1f\n"
8763 " call call_rwsem_down_read_failed\n"
8764@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8765 "1:\n\t"
8766 " mov %1,%2\n\t"
8767 " add %3,%2\n\t"
8768+
8769+#ifdef CONFIG_PAX_REFCOUNT
8770+ "jno 0f\n"
8771+ "sub %3,%2\n"
8772+ "int $4\n0:\n"
8773+ _ASM_EXTABLE(0b, 0b)
8774+#endif
8775+
8776 " jle 2f\n\t"
8777 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8778 " jnz 1b\n\t"
8779@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8780 long tmp;
8781 asm volatile("# beginning down_write\n\t"
8782 LOCK_PREFIX " xadd %1,(%2)\n\t"
8783+
8784+#ifdef CONFIG_PAX_REFCOUNT
8785+ "jno 0f\n"
8786+ "mov %1,(%2)\n"
8787+ "int $4\n0:\n"
8788+ _ASM_EXTABLE(0b, 0b)
8789+#endif
8790+
8791 /* adds 0xffff0001, returns the old value */
8792 " test %1,%1\n\t"
8793 /* was the count 0 before? */
8794@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8795 long tmp;
8796 asm volatile("# beginning __up_read\n\t"
8797 LOCK_PREFIX " xadd %1,(%2)\n\t"
8798+
8799+#ifdef CONFIG_PAX_REFCOUNT
8800+ "jno 0f\n"
8801+ "mov %1,(%2)\n"
8802+ "int $4\n0:\n"
8803+ _ASM_EXTABLE(0b, 0b)
8804+#endif
8805+
8806 /* subtracts 1, returns the old value */
8807 " jns 1f\n\t"
8808 " call call_rwsem_wake\n" /* expects old value in %edx */
8809@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8810 long tmp;
8811 asm volatile("# beginning __up_write\n\t"
8812 LOCK_PREFIX " xadd %1,(%2)\n\t"
8813+
8814+#ifdef CONFIG_PAX_REFCOUNT
8815+ "jno 0f\n"
8816+ "mov %1,(%2)\n"
8817+ "int $4\n0:\n"
8818+ _ASM_EXTABLE(0b, 0b)
8819+#endif
8820+
8821 /* subtracts 0xffff0001, returns the old value */
8822 " jns 1f\n\t"
8823 " call call_rwsem_wake\n" /* expects old value in %edx */
8824@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8825 {
8826 asm volatile("# beginning __downgrade_write\n\t"
8827 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8828+
8829+#ifdef CONFIG_PAX_REFCOUNT
8830+ "jno 0f\n"
8831+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8832+ "int $4\n0:\n"
8833+ _ASM_EXTABLE(0b, 0b)
8834+#endif
8835+
8836 /*
8837 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8838 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8839@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8840 */
8841 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8842 {
8843- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8844+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8845+
8846+#ifdef CONFIG_PAX_REFCOUNT
8847+ "jno 0f\n"
8848+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8849+ "int $4\n0:\n"
8850+ _ASM_EXTABLE(0b, 0b)
8851+#endif
8852+
8853 : "+m" (sem->count)
8854 : "er" (delta));
8855 }
8856@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8857 {
8858 long tmp = delta;
8859
8860- asm volatile(LOCK_PREFIX "xadd %0,%1"
8861+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8862+
8863+#ifdef CONFIG_PAX_REFCOUNT
8864+ "jno 0f\n"
8865+ "mov %0,%1\n"
8866+ "int $4\n0:\n"
8867+ _ASM_EXTABLE(0b, 0b)
8868+#endif
8869+
8870 : "+r" (tmp), "+m" (sem->count)
8871 : : "memory");
8872
8873diff -urNp linux-3.0.7/arch/x86/include/asm/segment.h linux-3.0.7/arch/x86/include/asm/segment.h
8874--- linux-3.0.7/arch/x86/include/asm/segment.h 2011-07-21 22:17:23.000000000 -0400
8875+++ linux-3.0.7/arch/x86/include/asm/segment.h 2011-09-17 00:53:42.000000000 -0400
8876@@ -64,10 +64,15 @@
8877 * 26 - ESPFIX small SS
8878 * 27 - per-cpu [ offset to per-cpu data area ]
8879 * 28 - stack_canary-20 [ for stack protector ]
8880- * 29 - unused
8881- * 30 - unused
8882+ * 29 - PCI BIOS CS
8883+ * 30 - PCI BIOS DS
8884 * 31 - TSS for double fault handler
8885 */
8886+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8887+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8888+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8889+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8890+
8891 #define GDT_ENTRY_TLS_MIN 6
8892 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8893
8894@@ -79,6 +84,8 @@
8895
8896 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8897
8898+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8899+
8900 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8901
8902 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8903@@ -104,6 +111,12 @@
8904 #define __KERNEL_STACK_CANARY 0
8905 #endif
8906
8907+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8908+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8909+
8910+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8911+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8912+
8913 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8914
8915 /*
8916@@ -141,7 +154,7 @@
8917 */
8918
8919 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8920-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8921+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8922
8923
8924 #else
8925@@ -165,6 +178,8 @@
8926 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8927 #define __USER32_DS __USER_DS
8928
8929+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8930+
8931 #define GDT_ENTRY_TSS 8 /* needs two entries */
8932 #define GDT_ENTRY_LDT 10 /* needs two entries */
8933 #define GDT_ENTRY_TLS_MIN 12
8934@@ -185,6 +200,7 @@
8935 #endif
8936
8937 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8938+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8939 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8940 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8941 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8942diff -urNp linux-3.0.7/arch/x86/include/asm/smp.h linux-3.0.7/arch/x86/include/asm/smp.h
8943--- linux-3.0.7/arch/x86/include/asm/smp.h 2011-07-21 22:17:23.000000000 -0400
8944+++ linux-3.0.7/arch/x86/include/asm/smp.h 2011-08-23 21:47:55.000000000 -0400
8945@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8946 /* cpus sharing the last level cache: */
8947 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8948 DECLARE_PER_CPU(u16, cpu_llc_id);
8949-DECLARE_PER_CPU(int, cpu_number);
8950+DECLARE_PER_CPU(unsigned int, cpu_number);
8951
8952 static inline struct cpumask *cpu_sibling_mask(int cpu)
8953 {
8954@@ -77,7 +77,7 @@ struct smp_ops {
8955
8956 void (*send_call_func_ipi)(const struct cpumask *mask);
8957 void (*send_call_func_single_ipi)(int cpu);
8958-};
8959+} __no_const;
8960
8961 /* Globals due to paravirt */
8962 extern void set_cpu_sibling_map(int cpu);
8963@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8964 extern int safe_smp_processor_id(void);
8965
8966 #elif defined(CONFIG_X86_64_SMP)
8967-#define raw_smp_processor_id() (percpu_read(cpu_number))
8968-
8969-#define stack_smp_processor_id() \
8970-({ \
8971- struct thread_info *ti; \
8972- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8973- ti->cpu; \
8974-})
8975+#define raw_smp_processor_id() (percpu_read(cpu_number))
8976+#define stack_smp_processor_id() raw_smp_processor_id()
8977 #define safe_smp_processor_id() smp_processor_id()
8978
8979 #endif
8980diff -urNp linux-3.0.7/arch/x86/include/asm/spinlock.h linux-3.0.7/arch/x86/include/asm/spinlock.h
8981--- linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-07-21 22:17:23.000000000 -0400
8982+++ linux-3.0.7/arch/x86/include/asm/spinlock.h 2011-08-23 21:47:55.000000000 -0400
8983@@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8984 static inline void arch_read_lock(arch_rwlock_t *rw)
8985 {
8986 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8987+
8988+#ifdef CONFIG_PAX_REFCOUNT
8989+ "jno 0f\n"
8990+ LOCK_PREFIX " addl $1,(%0)\n"
8991+ "int $4\n0:\n"
8992+ _ASM_EXTABLE(0b, 0b)
8993+#endif
8994+
8995 "jns 1f\n"
8996 "call __read_lock_failed\n\t"
8997 "1:\n"
8998@@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8999 static inline void arch_write_lock(arch_rwlock_t *rw)
9000 {
9001 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9002+
9003+#ifdef CONFIG_PAX_REFCOUNT
9004+ "jno 0f\n"
9005+ LOCK_PREFIX " addl %1,(%0)\n"
9006+ "int $4\n0:\n"
9007+ _ASM_EXTABLE(0b, 0b)
9008+#endif
9009+
9010 "jz 1f\n"
9011 "call __write_lock_failed\n\t"
9012 "1:\n"
9013@@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
9014
9015 static inline void arch_read_unlock(arch_rwlock_t *rw)
9016 {
9017- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9018+ asm volatile(LOCK_PREFIX "incl %0\n"
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ "jno 0f\n"
9022+ LOCK_PREFIX "decl %0\n"
9023+ "int $4\n0:\n"
9024+ _ASM_EXTABLE(0b, 0b)
9025+#endif
9026+
9027+ :"+m" (rw->lock) : : "memory");
9028 }
9029
9030 static inline void arch_write_unlock(arch_rwlock_t *rw)
9031 {
9032- asm volatile(LOCK_PREFIX "addl %1, %0"
9033+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9034+
9035+#ifdef CONFIG_PAX_REFCOUNT
9036+ "jno 0f\n"
9037+ LOCK_PREFIX "subl %1, %0\n"
9038+ "int $4\n0:\n"
9039+ _ASM_EXTABLE(0b, 0b)
9040+#endif
9041+
9042 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9043 }
9044
9045diff -urNp linux-3.0.7/arch/x86/include/asm/stackprotector.h linux-3.0.7/arch/x86/include/asm/stackprotector.h
9046--- linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-07-21 22:17:23.000000000 -0400
9047+++ linux-3.0.7/arch/x86/include/asm/stackprotector.h 2011-08-23 21:47:55.000000000 -0400
9048@@ -48,7 +48,7 @@
9049 * head_32 for boot CPU and setup_per_cpu_areas() for others.
9050 */
9051 #define GDT_STACK_CANARY_INIT \
9052- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9053+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9054
9055 /*
9056 * Initialize the stackprotector canary value.
9057@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9058
9059 static inline void load_stack_canary_segment(void)
9060 {
9061-#ifdef CONFIG_X86_32
9062+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9063 asm volatile ("mov %0, %%gs" : : "r" (0));
9064 #endif
9065 }
9066diff -urNp linux-3.0.7/arch/x86/include/asm/stacktrace.h linux-3.0.7/arch/x86/include/asm/stacktrace.h
9067--- linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-07-21 22:17:23.000000000 -0400
9068+++ linux-3.0.7/arch/x86/include/asm/stacktrace.h 2011-08-23 21:47:55.000000000 -0400
9069@@ -11,28 +11,20 @@
9070
9071 extern int kstack_depth_to_print;
9072
9073-struct thread_info;
9074+struct task_struct;
9075 struct stacktrace_ops;
9076
9077-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9078- unsigned long *stack,
9079- unsigned long bp,
9080- const struct stacktrace_ops *ops,
9081- void *data,
9082- unsigned long *end,
9083- int *graph);
9084-
9085-extern unsigned long
9086-print_context_stack(struct thread_info *tinfo,
9087- unsigned long *stack, unsigned long bp,
9088- const struct stacktrace_ops *ops, void *data,
9089- unsigned long *end, int *graph);
9090-
9091-extern unsigned long
9092-print_context_stack_bp(struct thread_info *tinfo,
9093- unsigned long *stack, unsigned long bp,
9094- const struct stacktrace_ops *ops, void *data,
9095- unsigned long *end, int *graph);
9096+typedef unsigned long walk_stack_t(struct task_struct *task,
9097+ void *stack_start,
9098+ unsigned long *stack,
9099+ unsigned long bp,
9100+ const struct stacktrace_ops *ops,
9101+ void *data,
9102+ unsigned long *end,
9103+ int *graph);
9104+
9105+extern walk_stack_t print_context_stack;
9106+extern walk_stack_t print_context_stack_bp;
9107
9108 /* Generic stack tracer with callbacks */
9109
9110@@ -40,7 +32,7 @@ struct stacktrace_ops {
9111 void (*address)(void *data, unsigned long address, int reliable);
9112 /* On negative return stop dumping */
9113 int (*stack)(void *data, char *name);
9114- walk_stack_t walk_stack;
9115+ walk_stack_t *walk_stack;
9116 };
9117
9118 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9119diff -urNp linux-3.0.7/arch/x86/include/asm/sys_ia32.h linux-3.0.7/arch/x86/include/asm/sys_ia32.h
9120--- linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-07-21 22:17:23.000000000 -0400
9121+++ linux-3.0.7/arch/x86/include/asm/sys_ia32.h 2011-10-06 04:17:55.000000000 -0400
9122@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9123 compat_sigset_t __user *, unsigned int);
9124 asmlinkage long sys32_alarm(unsigned int);
9125
9126-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9127+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9128 asmlinkage long sys32_sysfs(int, u32, u32);
9129
9130 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9131diff -urNp linux-3.0.7/arch/x86/include/asm/system.h linux-3.0.7/arch/x86/include/asm/system.h
9132--- linux-3.0.7/arch/x86/include/asm/system.h 2011-07-21 22:17:23.000000000 -0400
9133+++ linux-3.0.7/arch/x86/include/asm/system.h 2011-08-23 21:47:55.000000000 -0400
9134@@ -129,7 +129,7 @@ do { \
9135 "call __switch_to\n\t" \
9136 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9137 __switch_canary \
9138- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9139+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9140 "movq %%rax,%%rdi\n\t" \
9141 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9142 "jnz ret_from_fork\n\t" \
9143@@ -140,7 +140,7 @@ do { \
9144 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9145 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9146 [_tif_fork] "i" (_TIF_FORK), \
9147- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9148+ [thread_info] "m" (current_tinfo), \
9149 [current_task] "m" (current_task) \
9150 __switch_canary_iparam \
9151 : "memory", "cc" __EXTRA_CLOBBER)
9152@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9153 {
9154 unsigned long __limit;
9155 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9156- return __limit + 1;
9157+ return __limit;
9158 }
9159
9160 static inline void native_clts(void)
9161@@ -397,12 +397,12 @@ void enable_hlt(void);
9162
9163 void cpu_idle_wait(void);
9164
9165-extern unsigned long arch_align_stack(unsigned long sp);
9166+#define arch_align_stack(x) ((x) & ~0xfUL)
9167 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9168
9169 void default_idle(void);
9170
9171-void stop_this_cpu(void *dummy);
9172+void stop_this_cpu(void *dummy) __noreturn;
9173
9174 /*
9175 * Force strict CPU ordering.
9176diff -urNp linux-3.0.7/arch/x86/include/asm/thread_info.h linux-3.0.7/arch/x86/include/asm/thread_info.h
9177--- linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-07-21 22:17:23.000000000 -0400
9178+++ linux-3.0.7/arch/x86/include/asm/thread_info.h 2011-08-23 21:47:55.000000000 -0400
9179@@ -10,6 +10,7 @@
9180 #include <linux/compiler.h>
9181 #include <asm/page.h>
9182 #include <asm/types.h>
9183+#include <asm/percpu.h>
9184
9185 /*
9186 * low level task data that entry.S needs immediate access to
9187@@ -24,7 +25,6 @@ struct exec_domain;
9188 #include <asm/atomic.h>
9189
9190 struct thread_info {
9191- struct task_struct *task; /* main task structure */
9192 struct exec_domain *exec_domain; /* execution domain */
9193 __u32 flags; /* low level flags */
9194 __u32 status; /* thread synchronous flags */
9195@@ -34,18 +34,12 @@ struct thread_info {
9196 mm_segment_t addr_limit;
9197 struct restart_block restart_block;
9198 void __user *sysenter_return;
9199-#ifdef CONFIG_X86_32
9200- unsigned long previous_esp; /* ESP of the previous stack in
9201- case of nested (IRQ) stacks
9202- */
9203- __u8 supervisor_stack[0];
9204-#endif
9205+ unsigned long lowest_stack;
9206 int uaccess_err;
9207 };
9208
9209-#define INIT_THREAD_INFO(tsk) \
9210+#define INIT_THREAD_INFO \
9211 { \
9212- .task = &tsk, \
9213 .exec_domain = &default_exec_domain, \
9214 .flags = 0, \
9215 .cpu = 0, \
9216@@ -56,7 +50,7 @@ struct thread_info {
9217 }, \
9218 }
9219
9220-#define init_thread_info (init_thread_union.thread_info)
9221+#define init_thread_info (init_thread_union.stack)
9222 #define init_stack (init_thread_union.stack)
9223
9224 #else /* !__ASSEMBLY__ */
9225@@ -170,6 +164,23 @@ struct thread_info {
9226 ret; \
9227 })
9228
9229+#ifdef __ASSEMBLY__
9230+/* how to get the thread information struct from ASM */
9231+#define GET_THREAD_INFO(reg) \
9232+ mov PER_CPU_VAR(current_tinfo), reg
9233+
9234+/* use this one if reg already contains %esp */
9235+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9236+#else
9237+/* how to get the thread information struct from C */
9238+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9239+
9240+static __always_inline struct thread_info *current_thread_info(void)
9241+{
9242+ return percpu_read_stable(current_tinfo);
9243+}
9244+#endif
9245+
9246 #ifdef CONFIG_X86_32
9247
9248 #define STACK_WARN (THREAD_SIZE/8)
9249@@ -180,35 +191,13 @@ struct thread_info {
9250 */
9251 #ifndef __ASSEMBLY__
9252
9253-
9254 /* how to get the current stack pointer from C */
9255 register unsigned long current_stack_pointer asm("esp") __used;
9256
9257-/* how to get the thread information struct from C */
9258-static inline struct thread_info *current_thread_info(void)
9259-{
9260- return (struct thread_info *)
9261- (current_stack_pointer & ~(THREAD_SIZE - 1));
9262-}
9263-
9264-#else /* !__ASSEMBLY__ */
9265-
9266-/* how to get the thread information struct from ASM */
9267-#define GET_THREAD_INFO(reg) \
9268- movl $-THREAD_SIZE, reg; \
9269- andl %esp, reg
9270-
9271-/* use this one if reg already contains %esp */
9272-#define GET_THREAD_INFO_WITH_ESP(reg) \
9273- andl $-THREAD_SIZE, reg
9274-
9275 #endif
9276
9277 #else /* X86_32 */
9278
9279-#include <asm/percpu.h>
9280-#define KERNEL_STACK_OFFSET (5*8)
9281-
9282 /*
9283 * macros/functions for gaining access to the thread information structure
9284 * preempt_count needs to be 1 initially, until the scheduler is functional.
9285@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9286 #ifndef __ASSEMBLY__
9287 DECLARE_PER_CPU(unsigned long, kernel_stack);
9288
9289-static inline struct thread_info *current_thread_info(void)
9290-{
9291- struct thread_info *ti;
9292- ti = (void *)(percpu_read_stable(kernel_stack) +
9293- KERNEL_STACK_OFFSET - THREAD_SIZE);
9294- return ti;
9295-}
9296-
9297-#else /* !__ASSEMBLY__ */
9298-
9299-/* how to get the thread information struct from ASM */
9300-#define GET_THREAD_INFO(reg) \
9301- movq PER_CPU_VAR(kernel_stack),reg ; \
9302- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9303-
9304+/* how to get the current stack pointer from C */
9305+register unsigned long current_stack_pointer asm("rsp") __used;
9306 #endif
9307
9308 #endif /* !X86_32 */
9309@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9310 extern void free_thread_info(struct thread_info *ti);
9311 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9312 #define arch_task_cache_init arch_task_cache_init
9313+
9314+#define __HAVE_THREAD_FUNCTIONS
9315+#define task_thread_info(task) (&(task)->tinfo)
9316+#define task_stack_page(task) ((task)->stack)
9317+#define setup_thread_stack(p, org) do {} while (0)
9318+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9319+
9320+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9321+extern struct task_struct *alloc_task_struct_node(int node);
9322+extern void free_task_struct(struct task_struct *);
9323+
9324 #endif
9325 #endif /* _ASM_X86_THREAD_INFO_H */
9326diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_32.h linux-3.0.7/arch/x86/include/asm/uaccess_32.h
9327--- linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-07-21 22:17:23.000000000 -0400
9328+++ linux-3.0.7/arch/x86/include/asm/uaccess_32.h 2011-08-23 21:48:14.000000000 -0400
9329@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9330 static __always_inline unsigned long __must_check
9331 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9332 {
9333+ pax_track_stack();
9334+
9335+ if ((long)n < 0)
9336+ return n;
9337+
9338 if (__builtin_constant_p(n)) {
9339 unsigned long ret;
9340
9341@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9342 return ret;
9343 }
9344 }
9345+ if (!__builtin_constant_p(n))
9346+ check_object_size(from, n, true);
9347 return __copy_to_user_ll(to, from, n);
9348 }
9349
9350@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9351 __copy_to_user(void __user *to, const void *from, unsigned long n)
9352 {
9353 might_fault();
9354+
9355 return __copy_to_user_inatomic(to, from, n);
9356 }
9357
9358 static __always_inline unsigned long
9359 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9360 {
9361+ if ((long)n < 0)
9362+ return n;
9363+
9364 /* Avoid zeroing the tail if the copy fails..
9365 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9366 * but as the zeroing behaviour is only significant when n is not
9367@@ -137,6 +148,12 @@ static __always_inline unsigned long
9368 __copy_from_user(void *to, const void __user *from, unsigned long n)
9369 {
9370 might_fault();
9371+
9372+ pax_track_stack();
9373+
9374+ if ((long)n < 0)
9375+ return n;
9376+
9377 if (__builtin_constant_p(n)) {
9378 unsigned long ret;
9379
9380@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9381 return ret;
9382 }
9383 }
9384+ if (!__builtin_constant_p(n))
9385+ check_object_size(to, n, false);
9386 return __copy_from_user_ll(to, from, n);
9387 }
9388
9389@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9390 const void __user *from, unsigned long n)
9391 {
9392 might_fault();
9393+
9394+ if ((long)n < 0)
9395+ return n;
9396+
9397 if (__builtin_constant_p(n)) {
9398 unsigned long ret;
9399
9400@@ -181,15 +204,19 @@ static __always_inline unsigned long
9401 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9402 unsigned long n)
9403 {
9404- return __copy_from_user_ll_nocache_nozero(to, from, n);
9405-}
9406+ if ((long)n < 0)
9407+ return n;
9408
9409-unsigned long __must_check copy_to_user(void __user *to,
9410- const void *from, unsigned long n);
9411-unsigned long __must_check _copy_from_user(void *to,
9412- const void __user *from,
9413- unsigned long n);
9414+ return __copy_from_user_ll_nocache_nozero(to, from, n);
9415+}
9416
9417+extern void copy_to_user_overflow(void)
9418+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9419+ __compiletime_error("copy_to_user() buffer size is not provably correct")
9420+#else
9421+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9422+#endif
9423+;
9424
9425 extern void copy_from_user_overflow(void)
9426 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9427@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9428 #endif
9429 ;
9430
9431-static inline unsigned long __must_check copy_from_user(void *to,
9432- const void __user *from,
9433- unsigned long n)
9434+/**
9435+ * copy_to_user: - Copy a block of data into user space.
9436+ * @to: Destination address, in user space.
9437+ * @from: Source address, in kernel space.
9438+ * @n: Number of bytes to copy.
9439+ *
9440+ * Context: User context only. This function may sleep.
9441+ *
9442+ * Copy data from kernel space to user space.
9443+ *
9444+ * Returns number of bytes that could not be copied.
9445+ * On success, this will be zero.
9446+ */
9447+static inline unsigned long __must_check
9448+copy_to_user(void __user *to, const void *from, unsigned long n)
9449+{
9450+ int sz = __compiletime_object_size(from);
9451+
9452+ if (unlikely(sz != -1 && sz < n))
9453+ copy_to_user_overflow();
9454+ else if (access_ok(VERIFY_WRITE, to, n))
9455+ n = __copy_to_user(to, from, n);
9456+ return n;
9457+}
9458+
9459+/**
9460+ * copy_from_user: - Copy a block of data from user space.
9461+ * @to: Destination address, in kernel space.
9462+ * @from: Source address, in user space.
9463+ * @n: Number of bytes to copy.
9464+ *
9465+ * Context: User context only. This function may sleep.
9466+ *
9467+ * Copy data from user space to kernel space.
9468+ *
9469+ * Returns number of bytes that could not be copied.
9470+ * On success, this will be zero.
9471+ *
9472+ * If some data could not be copied, this function will pad the copied
9473+ * data to the requested size using zero bytes.
9474+ */
9475+static inline unsigned long __must_check
9476+copy_from_user(void *to, const void __user *from, unsigned long n)
9477 {
9478 int sz = __compiletime_object_size(to);
9479
9480- if (likely(sz == -1 || sz >= n))
9481- n = _copy_from_user(to, from, n);
9482- else
9483+ if (unlikely(sz != -1 && sz < n))
9484 copy_from_user_overflow();
9485-
9486+ else if (access_ok(VERIFY_READ, from, n))
9487+ n = __copy_from_user(to, from, n);
9488+ else if ((long)n > 0) {
9489+ if (!__builtin_constant_p(n))
9490+ check_object_size(to, n, false);
9491+ memset(to, 0, n);
9492+ }
9493 return n;
9494 }
9495
9496diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess_64.h linux-3.0.7/arch/x86/include/asm/uaccess_64.h
9497--- linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-07-21 22:17:23.000000000 -0400
9498+++ linux-3.0.7/arch/x86/include/asm/uaccess_64.h 2011-10-06 04:17:55.000000000 -0400
9499@@ -10,6 +10,9 @@
9500 #include <asm/alternative.h>
9501 #include <asm/cpufeature.h>
9502 #include <asm/page.h>
9503+#include <asm/pgtable.h>
9504+
9505+#define set_fs(x) (current_thread_info()->addr_limit = (x))
9506
9507 /*
9508 * Copy To/From Userspace
9509@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9510 return ret;
9511 }
9512
9513-__must_check unsigned long
9514-_copy_to_user(void __user *to, const void *from, unsigned len);
9515-__must_check unsigned long
9516-_copy_from_user(void *to, const void __user *from, unsigned len);
9517+static __always_inline __must_check unsigned long
9518+__copy_to_user(void __user *to, const void *from, unsigned len);
9519+static __always_inline __must_check unsigned long
9520+__copy_from_user(void *to, const void __user *from, unsigned len);
9521 __must_check unsigned long
9522 copy_in_user(void __user *to, const void __user *from, unsigned len);
9523
9524 static inline unsigned long __must_check copy_from_user(void *to,
9525 const void __user *from,
9526- unsigned long n)
9527+ unsigned n)
9528 {
9529- int sz = __compiletime_object_size(to);
9530-
9531 might_fault();
9532- if (likely(sz == -1 || sz >= n))
9533- n = _copy_from_user(to, from, n);
9534-#ifdef CONFIG_DEBUG_VM
9535- else
9536- WARN(1, "Buffer overflow detected!\n");
9537-#endif
9538+
9539+ if (access_ok(VERIFY_READ, from, n))
9540+ n = __copy_from_user(to, from, n);
9541+ else if ((int)n > 0) {
9542+ if (!__builtin_constant_p(n))
9543+ check_object_size(to, n, false);
9544+ memset(to, 0, n);
9545+ }
9546 return n;
9547 }
9548
9549@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9550 {
9551 might_fault();
9552
9553- return _copy_to_user(dst, src, size);
9554+ if (access_ok(VERIFY_WRITE, dst, size))
9555+ size = __copy_to_user(dst, src, size);
9556+ return size;
9557 }
9558
9559 static __always_inline __must_check
9560-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9561+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9562 {
9563- int ret = 0;
9564+ int sz = __compiletime_object_size(dst);
9565+ unsigned ret = 0;
9566
9567 might_fault();
9568- if (!__builtin_constant_p(size))
9569- return copy_user_generic(dst, (__force void *)src, size);
9570+
9571+ pax_track_stack();
9572+
9573+ if ((int)size < 0)
9574+ return size;
9575+
9576+#ifdef CONFIG_PAX_MEMORY_UDEREF
9577+ if (!__access_ok(VERIFY_READ, src, size))
9578+ return size;
9579+#endif
9580+
9581+ if (unlikely(sz != -1 && sz < size)) {
9582+#ifdef CONFIG_DEBUG_VM
9583+ WARN(1, "Buffer overflow detected!\n");
9584+#endif
9585+ return size;
9586+ }
9587+
9588+ if (!__builtin_constant_p(size)) {
9589+ check_object_size(dst, size, false);
9590+
9591+#ifdef CONFIG_PAX_MEMORY_UDEREF
9592+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9593+ src += PAX_USER_SHADOW_BASE;
9594+#endif
9595+
9596+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9597+ }
9598 switch (size) {
9599- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9600+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9601 ret, "b", "b", "=q", 1);
9602 return ret;
9603- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9604+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9605 ret, "w", "w", "=r", 2);
9606 return ret;
9607- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9608+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9609 ret, "l", "k", "=r", 4);
9610 return ret;
9611- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9612+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9613 ret, "q", "", "=r", 8);
9614 return ret;
9615 case 10:
9616- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9617+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9618 ret, "q", "", "=r", 10);
9619 if (unlikely(ret))
9620 return ret;
9621 __get_user_asm(*(u16 *)(8 + (char *)dst),
9622- (u16 __user *)(8 + (char __user *)src),
9623+ (const u16 __user *)(8 + (const char __user *)src),
9624 ret, "w", "w", "=r", 2);
9625 return ret;
9626 case 16:
9627- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9628+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9629 ret, "q", "", "=r", 16);
9630 if (unlikely(ret))
9631 return ret;
9632 __get_user_asm(*(u64 *)(8 + (char *)dst),
9633- (u64 __user *)(8 + (char __user *)src),
9634+ (const u64 __user *)(8 + (const char __user *)src),
9635 ret, "q", "", "=r", 8);
9636 return ret;
9637 default:
9638- return copy_user_generic(dst, (__force void *)src, size);
9639+
9640+#ifdef CONFIG_PAX_MEMORY_UDEREF
9641+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9642+ src += PAX_USER_SHADOW_BASE;
9643+#endif
9644+
9645+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9646 }
9647 }
9648
9649 static __always_inline __must_check
9650-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9651+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9652 {
9653- int ret = 0;
9654+ int sz = __compiletime_object_size(src);
9655+ unsigned ret = 0;
9656
9657 might_fault();
9658- if (!__builtin_constant_p(size))
9659- return copy_user_generic((__force void *)dst, src, size);
9660+
9661+ pax_track_stack();
9662+
9663+ if ((int)size < 0)
9664+ return size;
9665+
9666+#ifdef CONFIG_PAX_MEMORY_UDEREF
9667+ if (!__access_ok(VERIFY_WRITE, dst, size))
9668+ return size;
9669+#endif
9670+
9671+ if (unlikely(sz != -1 && sz < size)) {
9672+#ifdef CONFIG_DEBUG_VM
9673+ WARN(1, "Buffer overflow detected!\n");
9674+#endif
9675+ return size;
9676+ }
9677+
9678+ if (!__builtin_constant_p(size)) {
9679+ check_object_size(src, size, true);
9680+
9681+#ifdef CONFIG_PAX_MEMORY_UDEREF
9682+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9683+ dst += PAX_USER_SHADOW_BASE;
9684+#endif
9685+
9686+ return copy_user_generic((__force_kernel void *)dst, src, size);
9687+ }
9688 switch (size) {
9689- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9690+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9691 ret, "b", "b", "iq", 1);
9692 return ret;
9693- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9694+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9695 ret, "w", "w", "ir", 2);
9696 return ret;
9697- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9698+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9699 ret, "l", "k", "ir", 4);
9700 return ret;
9701- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9702+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9703 ret, "q", "", "er", 8);
9704 return ret;
9705 case 10:
9706- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9707+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9708 ret, "q", "", "er", 10);
9709 if (unlikely(ret))
9710 return ret;
9711 asm("":::"memory");
9712- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9713+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9714 ret, "w", "w", "ir", 2);
9715 return ret;
9716 case 16:
9717- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9718+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9719 ret, "q", "", "er", 16);
9720 if (unlikely(ret))
9721 return ret;
9722 asm("":::"memory");
9723- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9724+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9725 ret, "q", "", "er", 8);
9726 return ret;
9727 default:
9728- return copy_user_generic((__force void *)dst, src, size);
9729+
9730+#ifdef CONFIG_PAX_MEMORY_UDEREF
9731+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9732+ dst += PAX_USER_SHADOW_BASE;
9733+#endif
9734+
9735+ return copy_user_generic((__force_kernel void *)dst, src, size);
9736 }
9737 }
9738
9739 static __always_inline __must_check
9740-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9741+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9742 {
9743- int ret = 0;
9744+ unsigned ret = 0;
9745
9746 might_fault();
9747- if (!__builtin_constant_p(size))
9748- return copy_user_generic((__force void *)dst,
9749- (__force void *)src, size);
9750+
9751+ if ((int)size < 0)
9752+ return size;
9753+
9754+#ifdef CONFIG_PAX_MEMORY_UDEREF
9755+ if (!__access_ok(VERIFY_READ, src, size))
9756+ return size;
9757+ if (!__access_ok(VERIFY_WRITE, dst, size))
9758+ return size;
9759+#endif
9760+
9761+ if (!__builtin_constant_p(size)) {
9762+
9763+#ifdef CONFIG_PAX_MEMORY_UDEREF
9764+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9765+ src += PAX_USER_SHADOW_BASE;
9766+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9767+ dst += PAX_USER_SHADOW_BASE;
9768+#endif
9769+
9770+ return copy_user_generic((__force_kernel void *)dst,
9771+ (__force_kernel const void *)src, size);
9772+ }
9773 switch (size) {
9774 case 1: {
9775 u8 tmp;
9776- __get_user_asm(tmp, (u8 __user *)src,
9777+ __get_user_asm(tmp, (const u8 __user *)src,
9778 ret, "b", "b", "=q", 1);
9779 if (likely(!ret))
9780 __put_user_asm(tmp, (u8 __user *)dst,
9781@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9782 }
9783 case 2: {
9784 u16 tmp;
9785- __get_user_asm(tmp, (u16 __user *)src,
9786+ __get_user_asm(tmp, (const u16 __user *)src,
9787 ret, "w", "w", "=r", 2);
9788 if (likely(!ret))
9789 __put_user_asm(tmp, (u16 __user *)dst,
9790@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9791
9792 case 4: {
9793 u32 tmp;
9794- __get_user_asm(tmp, (u32 __user *)src,
9795+ __get_user_asm(tmp, (const u32 __user *)src,
9796 ret, "l", "k", "=r", 4);
9797 if (likely(!ret))
9798 __put_user_asm(tmp, (u32 __user *)dst,
9799@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9800 }
9801 case 8: {
9802 u64 tmp;
9803- __get_user_asm(tmp, (u64 __user *)src,
9804+ __get_user_asm(tmp, (const u64 __user *)src,
9805 ret, "q", "", "=r", 8);
9806 if (likely(!ret))
9807 __put_user_asm(tmp, (u64 __user *)dst,
9808@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9809 return ret;
9810 }
9811 default:
9812- return copy_user_generic((__force void *)dst,
9813- (__force void *)src, size);
9814+
9815+#ifdef CONFIG_PAX_MEMORY_UDEREF
9816+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9817+ src += PAX_USER_SHADOW_BASE;
9818+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9819+ dst += PAX_USER_SHADOW_BASE;
9820+#endif
9821+
9822+ return copy_user_generic((__force_kernel void *)dst,
9823+ (__force_kernel const void *)src, size);
9824 }
9825 }
9826
9827@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9828 static __must_check __always_inline int
9829 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9830 {
9831- return copy_user_generic(dst, (__force const void *)src, size);
9832+ pax_track_stack();
9833+
9834+ if ((int)size < 0)
9835+ return size;
9836+
9837+#ifdef CONFIG_PAX_MEMORY_UDEREF
9838+ if (!__access_ok(VERIFY_READ, src, size))
9839+ return size;
9840+
9841+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9842+ src += PAX_USER_SHADOW_BASE;
9843+#endif
9844+
9845+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9846 }
9847
9848-static __must_check __always_inline int
9849+static __must_check __always_inline unsigned long
9850 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9851 {
9852- return copy_user_generic((__force void *)dst, src, size);
9853+ if ((int)size < 0)
9854+ return size;
9855+
9856+#ifdef CONFIG_PAX_MEMORY_UDEREF
9857+ if (!__access_ok(VERIFY_WRITE, dst, size))
9858+ return size;
9859+
9860+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9861+ dst += PAX_USER_SHADOW_BASE;
9862+#endif
9863+
9864+ return copy_user_generic((__force_kernel void *)dst, src, size);
9865 }
9866
9867-extern long __copy_user_nocache(void *dst, const void __user *src,
9868+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9869 unsigned size, int zerorest);
9870
9871-static inline int
9872-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9873+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9874 {
9875 might_sleep();
9876+
9877+ if ((int)size < 0)
9878+ return size;
9879+
9880+#ifdef CONFIG_PAX_MEMORY_UDEREF
9881+ if (!__access_ok(VERIFY_READ, src, size))
9882+ return size;
9883+#endif
9884+
9885 return __copy_user_nocache(dst, src, size, 1);
9886 }
9887
9888-static inline int
9889-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9890+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9891 unsigned size)
9892 {
9893+ if ((int)size < 0)
9894+ return size;
9895+
9896+#ifdef CONFIG_PAX_MEMORY_UDEREF
9897+ if (!__access_ok(VERIFY_READ, src, size))
9898+ return size;
9899+#endif
9900+
9901 return __copy_user_nocache(dst, src, size, 0);
9902 }
9903
9904-unsigned long
9905-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9906+extern unsigned long
9907+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9908
9909 #endif /* _ASM_X86_UACCESS_64_H */
9910diff -urNp linux-3.0.7/arch/x86/include/asm/uaccess.h linux-3.0.7/arch/x86/include/asm/uaccess.h
9911--- linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-07-21 22:17:23.000000000 -0400
9912+++ linux-3.0.7/arch/x86/include/asm/uaccess.h 2011-10-06 04:17:55.000000000 -0400
9913@@ -7,12 +7,15 @@
9914 #include <linux/compiler.h>
9915 #include <linux/thread_info.h>
9916 #include <linux/string.h>
9917+#include <linux/sched.h>
9918 #include <asm/asm.h>
9919 #include <asm/page.h>
9920
9921 #define VERIFY_READ 0
9922 #define VERIFY_WRITE 1
9923
9924+extern void check_object_size(const void *ptr, unsigned long n, bool to);
9925+
9926 /*
9927 * The fs value determines whether argument validity checking should be
9928 * performed or not. If get_fs() == USER_DS, checking is performed, with
9929@@ -28,7 +31,12 @@
9930
9931 #define get_ds() (KERNEL_DS)
9932 #define get_fs() (current_thread_info()->addr_limit)
9933+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9934+void __set_fs(mm_segment_t x);
9935+void set_fs(mm_segment_t x);
9936+#else
9937 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9938+#endif
9939
9940 #define segment_eq(a, b) ((a).seg == (b).seg)
9941
9942@@ -76,7 +84,33 @@
9943 * checks that the pointer is in the user space range - after calling
9944 * this function, memory access functions may still return -EFAULT.
9945 */
9946-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9947+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9948+#define access_ok(type, addr, size) \
9949+({ \
9950+ long __size = size; \
9951+ unsigned long __addr = (unsigned long)addr; \
9952+ unsigned long __addr_ao = __addr & PAGE_MASK; \
9953+ unsigned long __end_ao = __addr + __size - 1; \
9954+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9955+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9956+ while(__addr_ao <= __end_ao) { \
9957+ char __c_ao; \
9958+ __addr_ao += PAGE_SIZE; \
9959+ if (__size > PAGE_SIZE) \
9960+ cond_resched(); \
9961+ if (__get_user(__c_ao, (char __user *)__addr)) \
9962+ break; \
9963+ if (type != VERIFY_WRITE) { \
9964+ __addr = __addr_ao; \
9965+ continue; \
9966+ } \
9967+ if (__put_user(__c_ao, (char __user *)__addr)) \
9968+ break; \
9969+ __addr = __addr_ao; \
9970+ } \
9971+ } \
9972+ __ret_ao; \
9973+})
9974
9975 /*
9976 * The exception table consists of pairs of addresses: the first is the
9977@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
9978 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9979 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9980
9981-
9982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9983+#define __copyuser_seg "gs;"
9984+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9985+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9986+#else
9987+#define __copyuser_seg
9988+#define __COPYUSER_SET_ES
9989+#define __COPYUSER_RESTORE_ES
9990+#endif
9991
9992 #ifdef CONFIG_X86_32
9993 #define __put_user_asm_u64(x, addr, err, errret) \
9994- asm volatile("1: movl %%eax,0(%2)\n" \
9995- "2: movl %%edx,4(%2)\n" \
9996+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9997+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9998 "3:\n" \
9999 ".section .fixup,\"ax\"\n" \
10000 "4: movl %3,%0\n" \
10001@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10002 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10003
10004 #define __put_user_asm_ex_u64(x, addr) \
10005- asm volatile("1: movl %%eax,0(%1)\n" \
10006- "2: movl %%edx,4(%1)\n" \
10007+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10008+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10009 "3:\n" \
10010 _ASM_EXTABLE(1b, 2b - 1b) \
10011 _ASM_EXTABLE(2b, 3b - 2b) \
10012@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10013 __typeof__(*(ptr)) __pu_val; \
10014 __chk_user_ptr(ptr); \
10015 might_fault(); \
10016- __pu_val = x; \
10017+ __pu_val = (x); \
10018 switch (sizeof(*(ptr))) { \
10019 case 1: \
10020 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10021@@ -373,7 +415,7 @@ do { \
10022 } while (0)
10023
10024 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10025- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10026+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10027 "2:\n" \
10028 ".section .fixup,\"ax\"\n" \
10029 "3: mov %3,%0\n" \
10030@@ -381,7 +423,7 @@ do { \
10031 " jmp 2b\n" \
10032 ".previous\n" \
10033 _ASM_EXTABLE(1b, 3b) \
10034- : "=r" (err), ltype(x) \
10035+ : "=r" (err), ltype (x) \
10036 : "m" (__m(addr)), "i" (errret), "0" (err))
10037
10038 #define __get_user_size_ex(x, ptr, size) \
10039@@ -406,7 +448,7 @@ do { \
10040 } while (0)
10041
10042 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10043- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10044+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10045 "2:\n" \
10046 _ASM_EXTABLE(1b, 2b - 1b) \
10047 : ltype(x) : "m" (__m(addr)))
10048@@ -423,13 +465,24 @@ do { \
10049 int __gu_err; \
10050 unsigned long __gu_val; \
10051 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10052- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10053+ (x) = (__typeof__(*(ptr)))__gu_val; \
10054 __gu_err; \
10055 })
10056
10057 /* FIXME: this hack is definitely wrong -AK */
10058 struct __large_struct { unsigned long buf[100]; };
10059-#define __m(x) (*(struct __large_struct __user *)(x))
10060+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10061+#define ____m(x) \
10062+({ \
10063+ unsigned long ____x = (unsigned long)(x); \
10064+ if (____x < PAX_USER_SHADOW_BASE) \
10065+ ____x += PAX_USER_SHADOW_BASE; \
10066+ (void __user *)____x; \
10067+})
10068+#else
10069+#define ____m(x) (x)
10070+#endif
10071+#define __m(x) (*(struct __large_struct __user *)____m(x))
10072
10073 /*
10074 * Tell gcc we read from memory instead of writing: this is because
10075@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10076 * aliasing issues.
10077 */
10078 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10079- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10080+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10081 "2:\n" \
10082 ".section .fixup,\"ax\"\n" \
10083 "3: mov %3,%0\n" \
10084@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10085 ".previous\n" \
10086 _ASM_EXTABLE(1b, 3b) \
10087 : "=r"(err) \
10088- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10089+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10090
10091 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10092- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10093+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10094 "2:\n" \
10095 _ASM_EXTABLE(1b, 2b - 1b) \
10096 : : ltype(x), "m" (__m(addr)))
10097@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10098 * On error, the variable @x is set to zero.
10099 */
10100
10101+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10102+#define __get_user(x, ptr) get_user((x), (ptr))
10103+#else
10104 #define __get_user(x, ptr) \
10105 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10106+#endif
10107
10108 /**
10109 * __put_user: - Write a simple value into user space, with less checking.
10110@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10111 * Returns zero on success, or -EFAULT on error.
10112 */
10113
10114+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10115+#define __put_user(x, ptr) put_user((x), (ptr))
10116+#else
10117 #define __put_user(x, ptr) \
10118 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10119+#endif
10120
10121 #define __get_user_unaligned __get_user
10122 #define __put_user_unaligned __put_user
10123@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10124 #define get_user_ex(x, ptr) do { \
10125 unsigned long __gue_val; \
10126 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10127- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10128+ (x) = (__typeof__(*(ptr)))__gue_val; \
10129 } while (0)
10130
10131 #ifdef CONFIG_X86_WP_WORKS_OK
10132diff -urNp linux-3.0.7/arch/x86/include/asm/vdso.h linux-3.0.7/arch/x86/include/asm/vdso.h
10133--- linux-3.0.7/arch/x86/include/asm/vdso.h 2011-07-21 22:17:23.000000000 -0400
10134+++ linux-3.0.7/arch/x86/include/asm/vdso.h 2011-10-06 04:17:55.000000000 -0400
10135@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10136 #define VDSO32_SYMBOL(base, name) \
10137 ({ \
10138 extern const char VDSO32_##name[]; \
10139- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10140+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10141 })
10142 #endif
10143
10144diff -urNp linux-3.0.7/arch/x86/include/asm/x86_init.h linux-3.0.7/arch/x86/include/asm/x86_init.h
10145--- linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-07-21 22:17:23.000000000 -0400
10146+++ linux-3.0.7/arch/x86/include/asm/x86_init.h 2011-08-23 21:47:55.000000000 -0400
10147@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10148 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10149 void (*find_smp_config)(void);
10150 void (*get_smp_config)(unsigned int early);
10151-};
10152+} __no_const;
10153
10154 /**
10155 * struct x86_init_resources - platform specific resource related ops
10156@@ -42,7 +42,7 @@ struct x86_init_resources {
10157 void (*probe_roms)(void);
10158 void (*reserve_resources)(void);
10159 char *(*memory_setup)(void);
10160-};
10161+} __no_const;
10162
10163 /**
10164 * struct x86_init_irqs - platform specific interrupt setup
10165@@ -55,7 +55,7 @@ struct x86_init_irqs {
10166 void (*pre_vector_init)(void);
10167 void (*intr_init)(void);
10168 void (*trap_init)(void);
10169-};
10170+} __no_const;
10171
10172 /**
10173 * struct x86_init_oem - oem platform specific customizing functions
10174@@ -65,7 +65,7 @@ struct x86_init_irqs {
10175 struct x86_init_oem {
10176 void (*arch_setup)(void);
10177 void (*banner)(void);
10178-};
10179+} __no_const;
10180
10181 /**
10182 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10183@@ -76,7 +76,7 @@ struct x86_init_oem {
10184 */
10185 struct x86_init_mapping {
10186 void (*pagetable_reserve)(u64 start, u64 end);
10187-};
10188+} __no_const;
10189
10190 /**
10191 * struct x86_init_paging - platform specific paging functions
10192@@ -86,7 +86,7 @@ struct x86_init_mapping {
10193 struct x86_init_paging {
10194 void (*pagetable_setup_start)(pgd_t *base);
10195 void (*pagetable_setup_done)(pgd_t *base);
10196-};
10197+} __no_const;
10198
10199 /**
10200 * struct x86_init_timers - platform specific timer setup
10201@@ -101,7 +101,7 @@ struct x86_init_timers {
10202 void (*tsc_pre_init)(void);
10203 void (*timer_init)(void);
10204 void (*wallclock_init)(void);
10205-};
10206+} __no_const;
10207
10208 /**
10209 * struct x86_init_iommu - platform specific iommu setup
10210@@ -109,7 +109,7 @@ struct x86_init_timers {
10211 */
10212 struct x86_init_iommu {
10213 int (*iommu_init)(void);
10214-};
10215+} __no_const;
10216
10217 /**
10218 * struct x86_init_pci - platform specific pci init functions
10219@@ -123,7 +123,7 @@ struct x86_init_pci {
10220 int (*init)(void);
10221 void (*init_irq)(void);
10222 void (*fixup_irqs)(void);
10223-};
10224+} __no_const;
10225
10226 /**
10227 * struct x86_init_ops - functions for platform specific setup
10228@@ -139,7 +139,7 @@ struct x86_init_ops {
10229 struct x86_init_timers timers;
10230 struct x86_init_iommu iommu;
10231 struct x86_init_pci pci;
10232-};
10233+} __no_const;
10234
10235 /**
10236 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10237@@ -147,7 +147,7 @@ struct x86_init_ops {
10238 */
10239 struct x86_cpuinit_ops {
10240 void (*setup_percpu_clockev)(void);
10241-};
10242+} __no_const;
10243
10244 /**
10245 * struct x86_platform_ops - platform specific runtime functions
10246@@ -166,7 +166,7 @@ struct x86_platform_ops {
10247 bool (*is_untracked_pat_range)(u64 start, u64 end);
10248 void (*nmi_init)(void);
10249 int (*i8042_detect)(void);
10250-};
10251+} __no_const;
10252
10253 struct pci_dev;
10254
10255@@ -174,7 +174,7 @@ struct x86_msi_ops {
10256 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10257 void (*teardown_msi_irq)(unsigned int irq);
10258 void (*teardown_msi_irqs)(struct pci_dev *dev);
10259-};
10260+} __no_const;
10261
10262 extern struct x86_init_ops x86_init;
10263 extern struct x86_cpuinit_ops x86_cpuinit;
10264diff -urNp linux-3.0.7/arch/x86/include/asm/xsave.h linux-3.0.7/arch/x86/include/asm/xsave.h
10265--- linux-3.0.7/arch/x86/include/asm/xsave.h 2011-07-21 22:17:23.000000000 -0400
10266+++ linux-3.0.7/arch/x86/include/asm/xsave.h 2011-10-06 04:17:55.000000000 -0400
10267@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10268 {
10269 int err;
10270
10271+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10272+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10273+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10274+#endif
10275+
10276 /*
10277 * Clear the xsave header first, so that reserved fields are
10278 * initialized to zero.
10279@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10280 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10281 {
10282 int err;
10283- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10284+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10285 u32 lmask = mask;
10286 u32 hmask = mask >> 32;
10287
10288+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10289+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10290+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10291+#endif
10292+
10293 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10294 "2:\n"
10295 ".section .fixup,\"ax\"\n"
10296diff -urNp linux-3.0.7/arch/x86/Kconfig linux-3.0.7/arch/x86/Kconfig
10297--- linux-3.0.7/arch/x86/Kconfig 2011-07-21 22:17:23.000000000 -0400
10298+++ linux-3.0.7/arch/x86/Kconfig 2011-09-17 00:58:36.000000000 -0400
10299@@ -229,7 +229,7 @@ config X86_HT
10300
10301 config X86_32_LAZY_GS
10302 def_bool y
10303- depends on X86_32 && !CC_STACKPROTECTOR
10304+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10305
10306 config ARCH_HWEIGHT_CFLAGS
10307 string
10308@@ -1018,7 +1018,7 @@ choice
10309
10310 config NOHIGHMEM
10311 bool "off"
10312- depends on !X86_NUMAQ
10313+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10314 ---help---
10315 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10316 However, the address space of 32-bit x86 processors is only 4
10317@@ -1055,7 +1055,7 @@ config NOHIGHMEM
10318
10319 config HIGHMEM4G
10320 bool "4GB"
10321- depends on !X86_NUMAQ
10322+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10323 ---help---
10324 Select this if you have a 32-bit processor and between 1 and 4
10325 gigabytes of physical RAM.
10326@@ -1109,7 +1109,7 @@ config PAGE_OFFSET
10327 hex
10328 default 0xB0000000 if VMSPLIT_3G_OPT
10329 default 0x80000000 if VMSPLIT_2G
10330- default 0x78000000 if VMSPLIT_2G_OPT
10331+ default 0x70000000 if VMSPLIT_2G_OPT
10332 default 0x40000000 if VMSPLIT_1G
10333 default 0xC0000000
10334 depends on X86_32
10335@@ -1483,6 +1483,7 @@ config SECCOMP
10336
10337 config CC_STACKPROTECTOR
10338 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10339+ depends on X86_64 || !PAX_MEMORY_UDEREF
10340 ---help---
10341 This option turns on the -fstack-protector GCC feature. This
10342 feature puts, at the beginning of functions, a canary value on
10343@@ -1540,6 +1541,7 @@ config KEXEC_JUMP
10344 config PHYSICAL_START
10345 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10346 default "0x1000000"
10347+ range 0x400000 0x40000000
10348 ---help---
10349 This gives the physical address where the kernel is loaded.
10350
10351@@ -1603,6 +1605,7 @@ config X86_NEED_RELOCS
10352 config PHYSICAL_ALIGN
10353 hex "Alignment value to which kernel should be aligned" if X86_32
10354 default "0x1000000"
10355+ range 0x400000 0x1000000 if PAX_KERNEXEC
10356 range 0x2000 0x1000000
10357 ---help---
10358 This value puts the alignment restrictions on physical address
10359@@ -1634,9 +1637,10 @@ config HOTPLUG_CPU
10360 Say N if you want to disable CPU hotplug.
10361
10362 config COMPAT_VDSO
10363- def_bool y
10364+ def_bool n
10365 prompt "Compat VDSO support"
10366 depends on X86_32 || IA32_EMULATION
10367+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10368 ---help---
10369 Map the 32-bit VDSO to the predictable old-style address too.
10370
10371diff -urNp linux-3.0.7/arch/x86/Kconfig.cpu linux-3.0.7/arch/x86/Kconfig.cpu
10372--- linux-3.0.7/arch/x86/Kconfig.cpu 2011-07-21 22:17:23.000000000 -0400
10373+++ linux-3.0.7/arch/x86/Kconfig.cpu 2011-08-23 21:47:55.000000000 -0400
10374@@ -338,7 +338,7 @@ config X86_PPRO_FENCE
10375
10376 config X86_F00F_BUG
10377 def_bool y
10378- depends on M586MMX || M586TSC || M586 || M486 || M386
10379+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10380
10381 config X86_INVD_BUG
10382 def_bool y
10383@@ -362,7 +362,7 @@ config X86_POPAD_OK
10384
10385 config X86_ALIGNMENT_16
10386 def_bool y
10387- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10388+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10389
10390 config X86_INTEL_USERCOPY
10391 def_bool y
10392@@ -408,7 +408,7 @@ config X86_CMPXCHG64
10393 # generates cmov.
10394 config X86_CMOV
10395 def_bool y
10396- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10397+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10398
10399 config X86_MINIMUM_CPU_FAMILY
10400 int
10401diff -urNp linux-3.0.7/arch/x86/Kconfig.debug linux-3.0.7/arch/x86/Kconfig.debug
10402--- linux-3.0.7/arch/x86/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
10403+++ linux-3.0.7/arch/x86/Kconfig.debug 2011-08-23 21:47:55.000000000 -0400
10404@@ -81,7 +81,7 @@ config X86_PTDUMP
10405 config DEBUG_RODATA
10406 bool "Write protect kernel read-only data structures"
10407 default y
10408- depends on DEBUG_KERNEL
10409+ depends on DEBUG_KERNEL && BROKEN
10410 ---help---
10411 Mark the kernel read-only data as write-protected in the pagetables,
10412 in order to catch accidental (and incorrect) writes to such const
10413@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10414
10415 config DEBUG_SET_MODULE_RONX
10416 bool "Set loadable kernel module data as NX and text as RO"
10417- depends on MODULES
10418+ depends on MODULES && BROKEN
10419 ---help---
10420 This option helps catch unintended modifications to loadable
10421 kernel module's text and read-only data. It also prevents execution
10422diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile
10423--- linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-07-21 22:17:23.000000000 -0400
10424+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/Makefile 2011-08-23 21:47:55.000000000 -0400
10425@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10426 $(call cc-option, -fno-stack-protector) \
10427 $(call cc-option, -mpreferred-stack-boundary=2)
10428 KBUILD_CFLAGS += $(call cc-option, -m32)
10429+ifdef CONSTIFY_PLUGIN
10430+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10431+endif
10432 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10433 GCOV_PROFILE := n
10434
10435diff -urNp linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S
10436--- linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-21 22:17:23.000000000 -0400
10437+++ linux-3.0.7/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-23 21:48:14.000000000 -0400
10438@@ -108,6 +108,9 @@ wakeup_code:
10439 /* Do any other stuff... */
10440
10441 #ifndef CONFIG_64BIT
10442+ /* Recheck NX bit overrides (64bit path does this in trampoline */
10443+ call verify_cpu
10444+
10445 /* This could also be done in C code... */
10446 movl pmode_cr3, %eax
10447 movl %eax, %cr3
10448@@ -131,6 +134,7 @@ wakeup_code:
10449 movl pmode_cr0, %eax
10450 movl %eax, %cr0
10451 jmp pmode_return
10452+# include "../../verify_cpu.S"
10453 #else
10454 pushw $0
10455 pushw trampoline_segment
10456diff -urNp linux-3.0.7/arch/x86/kernel/acpi/sleep.c linux-3.0.7/arch/x86/kernel/acpi/sleep.c
10457--- linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-07-21 22:17:23.000000000 -0400
10458+++ linux-3.0.7/arch/x86/kernel/acpi/sleep.c 2011-08-23 21:47:55.000000000 -0400
10459@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10460 header->trampoline_segment = trampoline_address() >> 4;
10461 #ifdef CONFIG_SMP
10462 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10463+
10464+ pax_open_kernel();
10465 early_gdt_descr.address =
10466 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10467+ pax_close_kernel();
10468+
10469 initial_gs = per_cpu_offset(smp_processor_id());
10470 #endif
10471 initial_code = (unsigned long)wakeup_long64;
10472diff -urNp linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S
10473--- linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-07-21 22:17:23.000000000 -0400
10474+++ linux-3.0.7/arch/x86/kernel/acpi/wakeup_32.S 2011-08-23 21:47:55.000000000 -0400
10475@@ -30,13 +30,11 @@ wakeup_pmode_return:
10476 # and restore the stack ... but you need gdt for this to work
10477 movl saved_context_esp, %esp
10478
10479- movl %cs:saved_magic, %eax
10480- cmpl $0x12345678, %eax
10481+ cmpl $0x12345678, saved_magic
10482 jne bogus_magic
10483
10484 # jump to place where we left off
10485- movl saved_eip, %eax
10486- jmp *%eax
10487+ jmp *(saved_eip)
10488
10489 bogus_magic:
10490 jmp bogus_magic
10491diff -urNp linux-3.0.7/arch/x86/kernel/alternative.c linux-3.0.7/arch/x86/kernel/alternative.c
10492--- linux-3.0.7/arch/x86/kernel/alternative.c 2011-07-21 22:17:23.000000000 -0400
10493+++ linux-3.0.7/arch/x86/kernel/alternative.c 2011-08-23 21:47:55.000000000 -0400
10494@@ -313,7 +313,7 @@ static void alternatives_smp_lock(const
10495 if (!*poff || ptr < text || ptr >= text_end)
10496 continue;
10497 /* turn DS segment override prefix into lock prefix */
10498- if (*ptr == 0x3e)
10499+ if (*ktla_ktva(ptr) == 0x3e)
10500 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10501 };
10502 mutex_unlock(&text_mutex);
10503@@ -334,7 +334,7 @@ static void alternatives_smp_unlock(cons
10504 if (!*poff || ptr < text || ptr >= text_end)
10505 continue;
10506 /* turn lock prefix into DS segment override prefix */
10507- if (*ptr == 0xf0)
10508+ if (*ktla_ktva(ptr) == 0xf0)
10509 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10510 };
10511 mutex_unlock(&text_mutex);
10512@@ -503,7 +503,7 @@ void __init_or_module apply_paravirt(str
10513
10514 BUG_ON(p->len > MAX_PATCH_LEN);
10515 /* prep the buffer with the original instructions */
10516- memcpy(insnbuf, p->instr, p->len);
10517+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10518 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10519 (unsigned long)p->instr, p->len);
10520
10521@@ -571,7 +571,7 @@ void __init alternative_instructions(voi
10522 if (smp_alt_once)
10523 free_init_pages("SMP alternatives",
10524 (unsigned long)__smp_locks,
10525- (unsigned long)__smp_locks_end);
10526+ PAGE_ALIGN((unsigned long)__smp_locks_end));
10527
10528 restart_nmi();
10529 }
10530@@ -588,13 +588,17 @@ void __init alternative_instructions(voi
10531 * instructions. And on the local CPU you need to be protected again NMI or MCE
10532 * handlers seeing an inconsistent instruction while you patch.
10533 */
10534-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10535+void *__kprobes text_poke_early(void *addr, const void *opcode,
10536 size_t len)
10537 {
10538 unsigned long flags;
10539 local_irq_save(flags);
10540- memcpy(addr, opcode, len);
10541+
10542+ pax_open_kernel();
10543+ memcpy(ktla_ktva(addr), opcode, len);
10544 sync_core();
10545+ pax_close_kernel();
10546+
10547 local_irq_restore(flags);
10548 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10549 that causes hangs on some VIA CPUs. */
10550@@ -616,36 +620,22 @@ void *__init_or_module text_poke_early(v
10551 */
10552 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10553 {
10554- unsigned long flags;
10555- char *vaddr;
10556+ unsigned char *vaddr = ktla_ktva(addr);
10557 struct page *pages[2];
10558- int i;
10559+ size_t i;
10560
10561 if (!core_kernel_text((unsigned long)addr)) {
10562- pages[0] = vmalloc_to_page(addr);
10563- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10564+ pages[0] = vmalloc_to_page(vaddr);
10565+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10566 } else {
10567- pages[0] = virt_to_page(addr);
10568+ pages[0] = virt_to_page(vaddr);
10569 WARN_ON(!PageReserved(pages[0]));
10570- pages[1] = virt_to_page(addr + PAGE_SIZE);
10571+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10572 }
10573 BUG_ON(!pages[0]);
10574- local_irq_save(flags);
10575- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10576- if (pages[1])
10577- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10578- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10579- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10580- clear_fixmap(FIX_TEXT_POKE0);
10581- if (pages[1])
10582- clear_fixmap(FIX_TEXT_POKE1);
10583- local_flush_tlb();
10584- sync_core();
10585- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10586- that causes hangs on some VIA CPUs. */
10587+ text_poke_early(addr, opcode, len);
10588 for (i = 0; i < len; i++)
10589- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10590- local_irq_restore(flags);
10591+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10592 return addr;
10593 }
10594
10595diff -urNp linux-3.0.7/arch/x86/kernel/apic/apic.c linux-3.0.7/arch/x86/kernel/apic/apic.c
10596--- linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-07-21 22:17:23.000000000 -0400
10597+++ linux-3.0.7/arch/x86/kernel/apic/apic.c 2011-08-23 21:48:14.000000000 -0400
10598@@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10599 /*
10600 * Debug level, exported for io_apic.c
10601 */
10602-unsigned int apic_verbosity;
10603+int apic_verbosity;
10604
10605 int pic_mode;
10606
10607@@ -1834,7 +1834,7 @@ void smp_error_interrupt(struct pt_regs
10608 apic_write(APIC_ESR, 0);
10609 v1 = apic_read(APIC_ESR);
10610 ack_APIC_irq();
10611- atomic_inc(&irq_err_count);
10612+ atomic_inc_unchecked(&irq_err_count);
10613
10614 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10615 smp_processor_id(), v0 , v1);
10616@@ -2190,6 +2190,8 @@ static int __cpuinit apic_cluster_num(vo
10617 u16 *bios_cpu_apicid;
10618 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10619
10620+ pax_track_stack();
10621+
10622 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10623 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10624
10625diff -urNp linux-3.0.7/arch/x86/kernel/apic/io_apic.c linux-3.0.7/arch/x86/kernel/apic/io_apic.c
10626--- linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-07-21 22:17:23.000000000 -0400
10627+++ linux-3.0.7/arch/x86/kernel/apic/io_apic.c 2011-08-23 21:47:55.000000000 -0400
10628@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10629 }
10630 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10631
10632-void lock_vector_lock(void)
10633+void lock_vector_lock(void) __acquires(vector_lock)
10634 {
10635 /* Used to the online set of cpus does not change
10636 * during assign_irq_vector.
10637@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10638 raw_spin_lock(&vector_lock);
10639 }
10640
10641-void unlock_vector_lock(void)
10642+void unlock_vector_lock(void) __releases(vector_lock)
10643 {
10644 raw_spin_unlock(&vector_lock);
10645 }
10646@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_dat
10647 ack_APIC_irq();
10648 }
10649
10650-atomic_t irq_mis_count;
10651+atomic_unchecked_t irq_mis_count;
10652
10653 /*
10654 * IO-APIC versions below 0x20 don't support EOI register.
10655@@ -2472,7 +2472,7 @@ static void ack_apic_level(struct irq_da
10656 * at the cpu.
10657 */
10658 if (!(v & (1 << (i & 0x1f)))) {
10659- atomic_inc(&irq_mis_count);
10660+ atomic_inc_unchecked(&irq_mis_count);
10661
10662 eoi_ioapic_irq(irq, cfg);
10663 }
10664diff -urNp linux-3.0.7/arch/x86/kernel/apm_32.c linux-3.0.7/arch/x86/kernel/apm_32.c
10665--- linux-3.0.7/arch/x86/kernel/apm_32.c 2011-07-21 22:17:23.000000000 -0400
10666+++ linux-3.0.7/arch/x86/kernel/apm_32.c 2011-08-23 21:47:55.000000000 -0400
10667@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10668 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10669 * even though they are called in protected mode.
10670 */
10671-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10672+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10673 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10674
10675 static const char driver_version[] = "1.16ac"; /* no spaces */
10676@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10677 BUG_ON(cpu != 0);
10678 gdt = get_cpu_gdt_table(cpu);
10679 save_desc_40 = gdt[0x40 / 8];
10680+
10681+ pax_open_kernel();
10682 gdt[0x40 / 8] = bad_bios_desc;
10683+ pax_close_kernel();
10684
10685 apm_irq_save(flags);
10686 APM_DO_SAVE_SEGS;
10687@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10688 &call->esi);
10689 APM_DO_RESTORE_SEGS;
10690 apm_irq_restore(flags);
10691+
10692+ pax_open_kernel();
10693 gdt[0x40 / 8] = save_desc_40;
10694+ pax_close_kernel();
10695+
10696 put_cpu();
10697
10698 return call->eax & 0xff;
10699@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10700 BUG_ON(cpu != 0);
10701 gdt = get_cpu_gdt_table(cpu);
10702 save_desc_40 = gdt[0x40 / 8];
10703+
10704+ pax_open_kernel();
10705 gdt[0x40 / 8] = bad_bios_desc;
10706+ pax_close_kernel();
10707
10708 apm_irq_save(flags);
10709 APM_DO_SAVE_SEGS;
10710@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10711 &call->eax);
10712 APM_DO_RESTORE_SEGS;
10713 apm_irq_restore(flags);
10714+
10715+ pax_open_kernel();
10716 gdt[0x40 / 8] = save_desc_40;
10717+ pax_close_kernel();
10718+
10719 put_cpu();
10720 return error;
10721 }
10722@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10723 * code to that CPU.
10724 */
10725 gdt = get_cpu_gdt_table(0);
10726+
10727+ pax_open_kernel();
10728 set_desc_base(&gdt[APM_CS >> 3],
10729 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10730 set_desc_base(&gdt[APM_CS_16 >> 3],
10731 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10732 set_desc_base(&gdt[APM_DS >> 3],
10733 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10734+ pax_close_kernel();
10735
10736 proc_create("apm", 0, NULL, &apm_file_ops);
10737
10738diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets_64.c linux-3.0.7/arch/x86/kernel/asm-offsets_64.c
10739--- linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-07-21 22:17:23.000000000 -0400
10740+++ linux-3.0.7/arch/x86/kernel/asm-offsets_64.c 2011-08-23 21:47:55.000000000 -0400
10741@@ -69,6 +69,7 @@ int main(void)
10742 BLANK();
10743 #undef ENTRY
10744
10745+ DEFINE(TSS_size, sizeof(struct tss_struct));
10746 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10747 BLANK();
10748
10749diff -urNp linux-3.0.7/arch/x86/kernel/asm-offsets.c linux-3.0.7/arch/x86/kernel/asm-offsets.c
10750--- linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-07-21 22:17:23.000000000 -0400
10751+++ linux-3.0.7/arch/x86/kernel/asm-offsets.c 2011-08-23 21:47:55.000000000 -0400
10752@@ -33,6 +33,8 @@ void common(void) {
10753 OFFSET(TI_status, thread_info, status);
10754 OFFSET(TI_addr_limit, thread_info, addr_limit);
10755 OFFSET(TI_preempt_count, thread_info, preempt_count);
10756+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10757+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10758
10759 BLANK();
10760 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10761@@ -53,8 +55,26 @@ void common(void) {
10762 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10763 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10764 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10765+
10766+#ifdef CONFIG_PAX_KERNEXEC
10767+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10768+#endif
10769+
10770+#ifdef CONFIG_PAX_MEMORY_UDEREF
10771+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10772+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10773+#ifdef CONFIG_X86_64
10774+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10775+#endif
10776 #endif
10777
10778+#endif
10779+
10780+ BLANK();
10781+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10782+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10783+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10784+
10785 #ifdef CONFIG_XEN
10786 BLANK();
10787 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10788diff -urNp linux-3.0.7/arch/x86/kernel/cpu/amd.c linux-3.0.7/arch/x86/kernel/cpu/amd.c
10789--- linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-07-21 22:17:23.000000000 -0400
10790+++ linux-3.0.7/arch/x86/kernel/cpu/amd.c 2011-08-23 21:47:55.000000000 -0400
10791@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10792 unsigned int size)
10793 {
10794 /* AMD errata T13 (order #21922) */
10795- if ((c->x86 == 6)) {
10796+ if (c->x86 == 6) {
10797 /* Duron Rev A0 */
10798 if (c->x86_model == 3 && c->x86_mask == 0)
10799 size = 64;
10800diff -urNp linux-3.0.7/arch/x86/kernel/cpu/common.c linux-3.0.7/arch/x86/kernel/cpu/common.c
10801--- linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-07-21 22:17:23.000000000 -0400
10802+++ linux-3.0.7/arch/x86/kernel/cpu/common.c 2011-08-23 21:47:55.000000000 -0400
10803@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10804
10805 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10806
10807-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10808-#ifdef CONFIG_X86_64
10809- /*
10810- * We need valid kernel segments for data and code in long mode too
10811- * IRET will check the segment types kkeil 2000/10/28
10812- * Also sysret mandates a special GDT layout
10813- *
10814- * TLS descriptors are currently at a different place compared to i386.
10815- * Hopefully nobody expects them at a fixed place (Wine?)
10816- */
10817- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10818- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10819- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10820- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10821- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10822- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10823-#else
10824- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10825- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10826- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10827- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10828- /*
10829- * Segments used for calling PnP BIOS have byte granularity.
10830- * They code segments and data segments have fixed 64k limits,
10831- * the transfer segment sizes are set at run time.
10832- */
10833- /* 32-bit code */
10834- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10835- /* 16-bit code */
10836- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10837- /* 16-bit data */
10838- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10839- /* 16-bit data */
10840- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10841- /* 16-bit data */
10842- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10843- /*
10844- * The APM segments have byte granularity and their bases
10845- * are set at run time. All have 64k limits.
10846- */
10847- /* 32-bit code */
10848- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10849- /* 16-bit code */
10850- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10851- /* data */
10852- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10853-
10854- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10855- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10856- GDT_STACK_CANARY_INIT
10857-#endif
10858-} };
10859-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10860-
10861 static int __init x86_xsave_setup(char *s)
10862 {
10863 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10864@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10865 {
10866 struct desc_ptr gdt_descr;
10867
10868- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10869+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10870 gdt_descr.size = GDT_SIZE - 1;
10871 load_gdt(&gdt_descr);
10872 /* Reload the per-cpu base */
10873@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10874 /* Filter out anything that depends on CPUID levels we don't have */
10875 filter_cpuid_features(c, true);
10876
10877+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10878+ setup_clear_cpu_cap(X86_FEATURE_SEP);
10879+#endif
10880+
10881 /* If the model name is still unset, do table lookup. */
10882 if (!c->x86_model_id[0]) {
10883 const char *p;
10884@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10885 }
10886 __setup("clearcpuid=", setup_disablecpuid);
10887
10888+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10889+EXPORT_PER_CPU_SYMBOL(current_tinfo);
10890+
10891 #ifdef CONFIG_X86_64
10892 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10893
10894@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10895 EXPORT_PER_CPU_SYMBOL(current_task);
10896
10897 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10898- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10899+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10900 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10901
10902 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10903@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10904 {
10905 memset(regs, 0, sizeof(struct pt_regs));
10906 regs->fs = __KERNEL_PERCPU;
10907- regs->gs = __KERNEL_STACK_CANARY;
10908+ savesegment(gs, regs->gs);
10909
10910 return regs;
10911 }
10912@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10913 int i;
10914
10915 cpu = stack_smp_processor_id();
10916- t = &per_cpu(init_tss, cpu);
10917+ t = init_tss + cpu;
10918 oist = &per_cpu(orig_ist, cpu);
10919
10920 #ifdef CONFIG_NUMA
10921@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
10922 switch_to_new_gdt(cpu);
10923 loadsegment(fs, 0);
10924
10925- load_idt((const struct desc_ptr *)&idt_descr);
10926+ load_idt(&idt_descr);
10927
10928 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10929 syscall_init();
10930@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
10931 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10932 barrier();
10933
10934- x86_configure_nx();
10935 if (cpu != 0)
10936 enable_x2apic();
10937
10938@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
10939 {
10940 int cpu = smp_processor_id();
10941 struct task_struct *curr = current;
10942- struct tss_struct *t = &per_cpu(init_tss, cpu);
10943+ struct tss_struct *t = init_tss + cpu;
10944 struct thread_struct *thread = &curr->thread;
10945
10946 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10947diff -urNp linux-3.0.7/arch/x86/kernel/cpu/intel.c linux-3.0.7/arch/x86/kernel/cpu/intel.c
10948--- linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-09-02 18:11:26.000000000 -0400
10949+++ linux-3.0.7/arch/x86/kernel/cpu/intel.c 2011-08-29 23:30:14.000000000 -0400
10950@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
10951 * Update the IDT descriptor and reload the IDT so that
10952 * it uses the read-only mapped virtual address.
10953 */
10954- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10955+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10956 load_idt(&idt_descr);
10957 }
10958 #endif
10959diff -urNp linux-3.0.7/arch/x86/kernel/cpu/Makefile linux-3.0.7/arch/x86/kernel/cpu/Makefile
10960--- linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-07-21 22:17:23.000000000 -0400
10961+++ linux-3.0.7/arch/x86/kernel/cpu/Makefile 2011-08-23 21:47:55.000000000 -0400
10962@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10963 CFLAGS_REMOVE_perf_event.o = -pg
10964 endif
10965
10966-# Make sure load_percpu_segment has no stackprotector
10967-nostackp := $(call cc-option, -fno-stack-protector)
10968-CFLAGS_common.o := $(nostackp)
10969-
10970 obj-y := intel_cacheinfo.o scattered.o topology.o
10971 obj-y += proc.o capflags.o powerflags.o common.o
10972 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10973diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c
10974--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-07-21 22:17:23.000000000 -0400
10975+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-23 21:47:55.000000000 -0400
10976@@ -46,6 +46,7 @@
10977 #include <asm/ipi.h>
10978 #include <asm/mce.h>
10979 #include <asm/msr.h>
10980+#include <asm/local.h>
10981
10982 #include "mce-internal.h"
10983
10984@@ -208,7 +209,7 @@ static void print_mce(struct mce *m)
10985 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10986 m->cs, m->ip);
10987
10988- if (m->cs == __KERNEL_CS)
10989+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10990 print_symbol("{%s}", m->ip);
10991 pr_cont("\n");
10992 }
10993@@ -236,10 +237,10 @@ static void print_mce(struct mce *m)
10994
10995 #define PANIC_TIMEOUT 5 /* 5 seconds */
10996
10997-static atomic_t mce_paniced;
10998+static atomic_unchecked_t mce_paniced;
10999
11000 static int fake_panic;
11001-static atomic_t mce_fake_paniced;
11002+static atomic_unchecked_t mce_fake_paniced;
11003
11004 /* Panic in progress. Enable interrupts and wait for final IPI */
11005 static void wait_for_panic(void)
11006@@ -263,7 +264,7 @@ static void mce_panic(char *msg, struct
11007 /*
11008 * Make sure only one CPU runs in machine check panic
11009 */
11010- if (atomic_inc_return(&mce_paniced) > 1)
11011+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11012 wait_for_panic();
11013 barrier();
11014
11015@@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
11016 console_verbose();
11017 } else {
11018 /* Don't log too much for fake panic */
11019- if (atomic_inc_return(&mce_fake_paniced) > 1)
11020+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11021 return;
11022 }
11023 /* First print corrected ones that are still unlogged */
11024@@ -638,7 +639,7 @@ static int mce_timed_out(u64 *t)
11025 * might have been modified by someone else.
11026 */
11027 rmb();
11028- if (atomic_read(&mce_paniced))
11029+ if (atomic_read_unchecked(&mce_paniced))
11030 wait_for_panic();
11031 if (!monarch_timeout)
11032 goto out;
11033@@ -1452,14 +1453,14 @@ void __cpuinit mcheck_cpu_init(struct cp
11034 */
11035
11036 static DEFINE_SPINLOCK(mce_state_lock);
11037-static int open_count; /* #times opened */
11038+static local_t open_count; /* #times opened */
11039 static int open_exclu; /* already open exclusive? */
11040
11041 static int mce_open(struct inode *inode, struct file *file)
11042 {
11043 spin_lock(&mce_state_lock);
11044
11045- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11046+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11047 spin_unlock(&mce_state_lock);
11048
11049 return -EBUSY;
11050@@ -1467,7 +1468,7 @@ static int mce_open(struct inode *inode,
11051
11052 if (file->f_flags & O_EXCL)
11053 open_exclu = 1;
11054- open_count++;
11055+ local_inc(&open_count);
11056
11057 spin_unlock(&mce_state_lock);
11058
11059@@ -1478,7 +1479,7 @@ static int mce_release(struct inode *ino
11060 {
11061 spin_lock(&mce_state_lock);
11062
11063- open_count--;
11064+ local_dec(&open_count);
11065 open_exclu = 0;
11066
11067 spin_unlock(&mce_state_lock);
11068@@ -2163,7 +2164,7 @@ struct dentry *mce_get_debugfs_dir(void)
11069 static void mce_reset(void)
11070 {
11071 cpu_missing = 0;
11072- atomic_set(&mce_fake_paniced, 0);
11073+ atomic_set_unchecked(&mce_fake_paniced, 0);
11074 atomic_set(&mce_executing, 0);
11075 atomic_set(&mce_callin, 0);
11076 atomic_set(&global_nwo, 0);
11077diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c
11078--- linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-07-21 22:17:23.000000000 -0400
11079+++ linux-3.0.7/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-23 21:47:55.000000000 -0400
11080@@ -215,7 +215,9 @@ static int inject_init(void)
11081 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11082 return -ENOMEM;
11083 printk(KERN_INFO "Machine check injector initialized\n");
11084- mce_chrdev_ops.write = mce_write;
11085+ pax_open_kernel();
11086+ *(void **)&mce_chrdev_ops.write = mce_write;
11087+ pax_close_kernel();
11088 register_die_notifier(&mce_raise_nb);
11089 return 0;
11090 }
11091diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c
11092--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-09-02 18:11:26.000000000 -0400
11093+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/main.c 2011-08-29 23:26:21.000000000 -0400
11094@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11095 u64 size_or_mask, size_and_mask;
11096 static bool mtrr_aps_delayed_init;
11097
11098-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11099+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11100
11101 const struct mtrr_ops *mtrr_if;
11102
11103diff -urNp linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h
11104--- linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-07-21 22:17:23.000000000 -0400
11105+++ linux-3.0.7/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 19:49:56.000000000 -0400
11106@@ -25,7 +25,7 @@ struct mtrr_ops {
11107 int (*validate_add_page)(unsigned long base, unsigned long size,
11108 unsigned int type);
11109 int (*have_wrcomb)(void);
11110-};
11111+} __do_const;
11112
11113 extern int generic_get_free_region(unsigned long base, unsigned long size,
11114 int replace_reg);
11115diff -urNp linux-3.0.7/arch/x86/kernel/cpu/perf_event.c linux-3.0.7/arch/x86/kernel/cpu/perf_event.c
11116--- linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:54:53.000000000 -0400
11117+++ linux-3.0.7/arch/x86/kernel/cpu/perf_event.c 2011-10-16 21:55:27.000000000 -0400
11118@@ -781,6 +781,8 @@ static int x86_schedule_events(struct cp
11119 int i, j, w, wmax, num = 0;
11120 struct hw_perf_event *hwc;
11121
11122+ pax_track_stack();
11123+
11124 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11125
11126 for (i = 0; i < n; i++) {
11127@@ -1875,7 +1877,7 @@ perf_callchain_user(struct perf_callchai
11128 break;
11129
11130 perf_callchain_store(entry, frame.return_address);
11131- fp = frame.next_frame;
11132+ fp = (const void __force_user *)frame.next_frame;
11133 }
11134 }
11135
11136diff -urNp linux-3.0.7/arch/x86/kernel/crash.c linux-3.0.7/arch/x86/kernel/crash.c
11137--- linux-3.0.7/arch/x86/kernel/crash.c 2011-07-21 22:17:23.000000000 -0400
11138+++ linux-3.0.7/arch/x86/kernel/crash.c 2011-08-23 21:47:55.000000000 -0400
11139@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11140 regs = args->regs;
11141
11142 #ifdef CONFIG_X86_32
11143- if (!user_mode_vm(regs)) {
11144+ if (!user_mode(regs)) {
11145 crash_fixup_ss_esp(&fixed_regs, regs);
11146 regs = &fixed_regs;
11147 }
11148diff -urNp linux-3.0.7/arch/x86/kernel/doublefault_32.c linux-3.0.7/arch/x86/kernel/doublefault_32.c
11149--- linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-07-21 22:17:23.000000000 -0400
11150+++ linux-3.0.7/arch/x86/kernel/doublefault_32.c 2011-08-23 21:47:55.000000000 -0400
11151@@ -11,7 +11,7 @@
11152
11153 #define DOUBLEFAULT_STACKSIZE (1024)
11154 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11155-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11156+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11157
11158 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11159
11160@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11161 unsigned long gdt, tss;
11162
11163 store_gdt(&gdt_desc);
11164- gdt = gdt_desc.address;
11165+ gdt = (unsigned long)gdt_desc.address;
11166
11167 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11168
11169@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11170 /* 0x2 bit is always set */
11171 .flags = X86_EFLAGS_SF | 0x2,
11172 .sp = STACK_START,
11173- .es = __USER_DS,
11174+ .es = __KERNEL_DS,
11175 .cs = __KERNEL_CS,
11176 .ss = __KERNEL_DS,
11177- .ds = __USER_DS,
11178+ .ds = __KERNEL_DS,
11179 .fs = __KERNEL_PERCPU,
11180
11181 .__cr3 = __pa_nodebug(swapper_pg_dir),
11182diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_32.c linux-3.0.7/arch/x86/kernel/dumpstack_32.c
11183--- linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-07-21 22:17:23.000000000 -0400
11184+++ linux-3.0.7/arch/x86/kernel/dumpstack_32.c 2011-08-23 21:47:55.000000000 -0400
11185@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11186 bp = stack_frame(task, regs);
11187
11188 for (;;) {
11189- struct thread_info *context;
11190+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11191
11192- context = (struct thread_info *)
11193- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11194- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11195+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11196
11197- stack = (unsigned long *)context->previous_esp;
11198- if (!stack)
11199+ if (stack_start == task_stack_page(task))
11200 break;
11201+ stack = *(unsigned long **)stack_start;
11202 if (ops->stack(data, "IRQ") < 0)
11203 break;
11204 touch_nmi_watchdog();
11205@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11206 * When in-kernel, we also print out the stack and code at the
11207 * time of the fault..
11208 */
11209- if (!user_mode_vm(regs)) {
11210+ if (!user_mode(regs)) {
11211 unsigned int code_prologue = code_bytes * 43 / 64;
11212 unsigned int code_len = code_bytes;
11213 unsigned char c;
11214 u8 *ip;
11215+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11216
11217 printk(KERN_EMERG "Stack:\n");
11218 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11219
11220 printk(KERN_EMERG "Code: ");
11221
11222- ip = (u8 *)regs->ip - code_prologue;
11223+ ip = (u8 *)regs->ip - code_prologue + cs_base;
11224 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11225 /* try starting at IP */
11226- ip = (u8 *)regs->ip;
11227+ ip = (u8 *)regs->ip + cs_base;
11228 code_len = code_len - code_prologue + 1;
11229 }
11230 for (i = 0; i < code_len; i++, ip++) {
11231@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11232 printk(" Bad EIP value.");
11233 break;
11234 }
11235- if (ip == (u8 *)regs->ip)
11236+ if (ip == (u8 *)regs->ip + cs_base)
11237 printk("<%02x> ", c);
11238 else
11239 printk("%02x ", c);
11240@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11241 {
11242 unsigned short ud2;
11243
11244+ ip = ktla_ktva(ip);
11245 if (ip < PAGE_OFFSET)
11246 return 0;
11247 if (probe_kernel_address((unsigned short *)ip, ud2))
11248diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack_64.c linux-3.0.7/arch/x86/kernel/dumpstack_64.c
11249--- linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-07-21 22:17:23.000000000 -0400
11250+++ linux-3.0.7/arch/x86/kernel/dumpstack_64.c 2011-08-23 21:47:55.000000000 -0400
11251@@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11252 unsigned long *irq_stack_end =
11253 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11254 unsigned used = 0;
11255- struct thread_info *tinfo;
11256 int graph = 0;
11257 unsigned long dummy;
11258+ void *stack_start;
11259
11260 if (!task)
11261 task = current;
11262@@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11263 * current stack address. If the stacks consist of nested
11264 * exceptions
11265 */
11266- tinfo = task_thread_info(task);
11267 for (;;) {
11268 char *id;
11269 unsigned long *estack_end;
11270+
11271 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11272 &used, &id);
11273
11274@@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11275 if (ops->stack(data, id) < 0)
11276 break;
11277
11278- bp = ops->walk_stack(tinfo, stack, bp, ops,
11279+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11280 data, estack_end, &graph);
11281 ops->stack(data, "<EOE>");
11282 /*
11283@@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11284 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11285 if (ops->stack(data, "IRQ") < 0)
11286 break;
11287- bp = ops->walk_stack(tinfo, stack, bp,
11288+ bp = ops->walk_stack(task, irq_stack, stack, bp,
11289 ops, data, irq_stack_end, &graph);
11290 /*
11291 * We link to the next stack (which would be
11292@@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11293 /*
11294 * This handles the process stack:
11295 */
11296- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11297+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11298+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11299 put_cpu();
11300 }
11301 EXPORT_SYMBOL(dump_trace);
11302diff -urNp linux-3.0.7/arch/x86/kernel/dumpstack.c linux-3.0.7/arch/x86/kernel/dumpstack.c
11303--- linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-07-21 22:17:23.000000000 -0400
11304+++ linux-3.0.7/arch/x86/kernel/dumpstack.c 2011-08-23 21:48:14.000000000 -0400
11305@@ -2,6 +2,9 @@
11306 * Copyright (C) 1991, 1992 Linus Torvalds
11307 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11308 */
11309+#ifdef CONFIG_GRKERNSEC_HIDESYM
11310+#define __INCLUDED_BY_HIDESYM 1
11311+#endif
11312 #include <linux/kallsyms.h>
11313 #include <linux/kprobes.h>
11314 #include <linux/uaccess.h>
11315@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11316 static void
11317 print_ftrace_graph_addr(unsigned long addr, void *data,
11318 const struct stacktrace_ops *ops,
11319- struct thread_info *tinfo, int *graph)
11320+ struct task_struct *task, int *graph)
11321 {
11322- struct task_struct *task = tinfo->task;
11323 unsigned long ret_addr;
11324 int index = task->curr_ret_stack;
11325
11326@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11327 static inline void
11328 print_ftrace_graph_addr(unsigned long addr, void *data,
11329 const struct stacktrace_ops *ops,
11330- struct thread_info *tinfo, int *graph)
11331+ struct task_struct *task, int *graph)
11332 { }
11333 #endif
11334
11335@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11336 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11337 */
11338
11339-static inline int valid_stack_ptr(struct thread_info *tinfo,
11340- void *p, unsigned int size, void *end)
11341+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11342 {
11343- void *t = tinfo;
11344 if (end) {
11345 if (p < end && p >= (end-THREAD_SIZE))
11346 return 1;
11347@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11348 }
11349
11350 unsigned long
11351-print_context_stack(struct thread_info *tinfo,
11352+print_context_stack(struct task_struct *task, void *stack_start,
11353 unsigned long *stack, unsigned long bp,
11354 const struct stacktrace_ops *ops, void *data,
11355 unsigned long *end, int *graph)
11356 {
11357 struct stack_frame *frame = (struct stack_frame *)bp;
11358
11359- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11360+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11361 unsigned long addr;
11362
11363 addr = *stack;
11364@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11365 } else {
11366 ops->address(data, addr, 0);
11367 }
11368- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11369+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11370 }
11371 stack++;
11372 }
11373@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11374 EXPORT_SYMBOL_GPL(print_context_stack);
11375
11376 unsigned long
11377-print_context_stack_bp(struct thread_info *tinfo,
11378+print_context_stack_bp(struct task_struct *task, void *stack_start,
11379 unsigned long *stack, unsigned long bp,
11380 const struct stacktrace_ops *ops, void *data,
11381 unsigned long *end, int *graph)
11382@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11383 struct stack_frame *frame = (struct stack_frame *)bp;
11384 unsigned long *ret_addr = &frame->return_address;
11385
11386- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11387+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11388 unsigned long addr = *ret_addr;
11389
11390 if (!__kernel_text_address(addr))
11391@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11392 ops->address(data, addr, 1);
11393 frame = frame->next_frame;
11394 ret_addr = &frame->return_address;
11395- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11396+ print_ftrace_graph_addr(addr, data, ops, task, graph);
11397 }
11398
11399 return (unsigned long)frame;
11400@@ -186,7 +186,7 @@ void dump_stack(void)
11401
11402 bp = stack_frame(current, NULL);
11403 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11404- current->pid, current->comm, print_tainted(),
11405+ task_pid_nr(current), current->comm, print_tainted(),
11406 init_utsname()->release,
11407 (int)strcspn(init_utsname()->version, " "),
11408 init_utsname()->version);
11409@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11410 }
11411 EXPORT_SYMBOL_GPL(oops_begin);
11412
11413+extern void gr_handle_kernel_exploit(void);
11414+
11415 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11416 {
11417 if (regs && kexec_should_crash(current))
11418@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11419 panic("Fatal exception in interrupt");
11420 if (panic_on_oops)
11421 panic("Fatal exception");
11422- do_exit(signr);
11423+
11424+ gr_handle_kernel_exploit();
11425+
11426+ do_group_exit(signr);
11427 }
11428
11429 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11430@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11431
11432 show_registers(regs);
11433 #ifdef CONFIG_X86_32
11434- if (user_mode_vm(regs)) {
11435+ if (user_mode(regs)) {
11436 sp = regs->sp;
11437 ss = regs->ss & 0xffff;
11438 } else {
11439@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11440 unsigned long flags = oops_begin();
11441 int sig = SIGSEGV;
11442
11443- if (!user_mode_vm(regs))
11444+ if (!user_mode(regs))
11445 report_bug(regs->ip, regs);
11446
11447 if (__die(str, regs, err))
11448diff -urNp linux-3.0.7/arch/x86/kernel/early_printk.c linux-3.0.7/arch/x86/kernel/early_printk.c
11449--- linux-3.0.7/arch/x86/kernel/early_printk.c 2011-07-21 22:17:23.000000000 -0400
11450+++ linux-3.0.7/arch/x86/kernel/early_printk.c 2011-08-23 21:48:14.000000000 -0400
11451@@ -7,6 +7,7 @@
11452 #include <linux/pci_regs.h>
11453 #include <linux/pci_ids.h>
11454 #include <linux/errno.h>
11455+#include <linux/sched.h>
11456 #include <asm/io.h>
11457 #include <asm/processor.h>
11458 #include <asm/fcntl.h>
11459@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11460 int n;
11461 va_list ap;
11462
11463+ pax_track_stack();
11464+
11465 va_start(ap, fmt);
11466 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11467 early_console->write(early_console, buf, n);
11468diff -urNp linux-3.0.7/arch/x86/kernel/entry_32.S linux-3.0.7/arch/x86/kernel/entry_32.S
11469--- linux-3.0.7/arch/x86/kernel/entry_32.S 2011-07-21 22:17:23.000000000 -0400
11470+++ linux-3.0.7/arch/x86/kernel/entry_32.S 2011-08-30 18:23:52.000000000 -0400
11471@@ -185,13 +185,146 @@
11472 /*CFI_REL_OFFSET gs, PT_GS*/
11473 .endm
11474 .macro SET_KERNEL_GS reg
11475+
11476+#ifdef CONFIG_CC_STACKPROTECTOR
11477 movl $(__KERNEL_STACK_CANARY), \reg
11478+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11479+ movl $(__USER_DS), \reg
11480+#else
11481+ xorl \reg, \reg
11482+#endif
11483+
11484 movl \reg, %gs
11485 .endm
11486
11487 #endif /* CONFIG_X86_32_LAZY_GS */
11488
11489-.macro SAVE_ALL
11490+.macro pax_enter_kernel
11491+#ifdef CONFIG_PAX_KERNEXEC
11492+ call pax_enter_kernel
11493+#endif
11494+.endm
11495+
11496+.macro pax_exit_kernel
11497+#ifdef CONFIG_PAX_KERNEXEC
11498+ call pax_exit_kernel
11499+#endif
11500+.endm
11501+
11502+#ifdef CONFIG_PAX_KERNEXEC
11503+ENTRY(pax_enter_kernel)
11504+#ifdef CONFIG_PARAVIRT
11505+ pushl %eax
11506+ pushl %ecx
11507+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11508+ mov %eax, %esi
11509+#else
11510+ mov %cr0, %esi
11511+#endif
11512+ bts $16, %esi
11513+ jnc 1f
11514+ mov %cs, %esi
11515+ cmp $__KERNEL_CS, %esi
11516+ jz 3f
11517+ ljmp $__KERNEL_CS, $3f
11518+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11519+2:
11520+#ifdef CONFIG_PARAVIRT
11521+ mov %esi, %eax
11522+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11523+#else
11524+ mov %esi, %cr0
11525+#endif
11526+3:
11527+#ifdef CONFIG_PARAVIRT
11528+ popl %ecx
11529+ popl %eax
11530+#endif
11531+ ret
11532+ENDPROC(pax_enter_kernel)
11533+
11534+ENTRY(pax_exit_kernel)
11535+#ifdef CONFIG_PARAVIRT
11536+ pushl %eax
11537+ pushl %ecx
11538+#endif
11539+ mov %cs, %esi
11540+ cmp $__KERNEXEC_KERNEL_CS, %esi
11541+ jnz 2f
11542+#ifdef CONFIG_PARAVIRT
11543+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11544+ mov %eax, %esi
11545+#else
11546+ mov %cr0, %esi
11547+#endif
11548+ btr $16, %esi
11549+ ljmp $__KERNEL_CS, $1f
11550+1:
11551+#ifdef CONFIG_PARAVIRT
11552+ mov %esi, %eax
11553+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11554+#else
11555+ mov %esi, %cr0
11556+#endif
11557+2:
11558+#ifdef CONFIG_PARAVIRT
11559+ popl %ecx
11560+ popl %eax
11561+#endif
11562+ ret
11563+ENDPROC(pax_exit_kernel)
11564+#endif
11565+
11566+.macro pax_erase_kstack
11567+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11568+ call pax_erase_kstack
11569+#endif
11570+.endm
11571+
11572+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11573+/*
11574+ * ebp: thread_info
11575+ * ecx, edx: can be clobbered
11576+ */
11577+ENTRY(pax_erase_kstack)
11578+ pushl %edi
11579+ pushl %eax
11580+
11581+ mov TI_lowest_stack(%ebp), %edi
11582+ mov $-0xBEEF, %eax
11583+ std
11584+
11585+1: mov %edi, %ecx
11586+ and $THREAD_SIZE_asm - 1, %ecx
11587+ shr $2, %ecx
11588+ repne scasl
11589+ jecxz 2f
11590+
11591+ cmp $2*16, %ecx
11592+ jc 2f
11593+
11594+ mov $2*16, %ecx
11595+ repe scasl
11596+ jecxz 2f
11597+ jne 1b
11598+
11599+2: cld
11600+ mov %esp, %ecx
11601+ sub %edi, %ecx
11602+ shr $2, %ecx
11603+ rep stosl
11604+
11605+ mov TI_task_thread_sp0(%ebp), %edi
11606+ sub $128, %edi
11607+ mov %edi, TI_lowest_stack(%ebp)
11608+
11609+ popl %eax
11610+ popl %edi
11611+ ret
11612+ENDPROC(pax_erase_kstack)
11613+#endif
11614+
11615+.macro __SAVE_ALL _DS
11616 cld
11617 PUSH_GS
11618 pushl_cfi %fs
11619@@ -214,7 +347,7 @@
11620 CFI_REL_OFFSET ecx, 0
11621 pushl_cfi %ebx
11622 CFI_REL_OFFSET ebx, 0
11623- movl $(__USER_DS), %edx
11624+ movl $\_DS, %edx
11625 movl %edx, %ds
11626 movl %edx, %es
11627 movl $(__KERNEL_PERCPU), %edx
11628@@ -222,6 +355,15 @@
11629 SET_KERNEL_GS %edx
11630 .endm
11631
11632+.macro SAVE_ALL
11633+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11634+ __SAVE_ALL __KERNEL_DS
11635+ pax_enter_kernel
11636+#else
11637+ __SAVE_ALL __USER_DS
11638+#endif
11639+.endm
11640+
11641 .macro RESTORE_INT_REGS
11642 popl_cfi %ebx
11643 CFI_RESTORE ebx
11644@@ -332,7 +474,15 @@ check_userspace:
11645 movb PT_CS(%esp), %al
11646 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11647 cmpl $USER_RPL, %eax
11648+
11649+#ifdef CONFIG_PAX_KERNEXEC
11650+ jae resume_userspace
11651+
11652+ PAX_EXIT_KERNEL
11653+ jmp resume_kernel
11654+#else
11655 jb resume_kernel # not returning to v8086 or userspace
11656+#endif
11657
11658 ENTRY(resume_userspace)
11659 LOCKDEP_SYS_EXIT
11660@@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11661 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11662 # int/exception return?
11663 jne work_pending
11664- jmp restore_all
11665+ jmp restore_all_pax
11666 END(ret_from_exception)
11667
11668 #ifdef CONFIG_PREEMPT
11669@@ -394,23 +544,34 @@ sysenter_past_esp:
11670 /*CFI_REL_OFFSET cs, 0*/
11671 /*
11672 * Push current_thread_info()->sysenter_return to the stack.
11673- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11674- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11675 */
11676- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11677+ pushl_cfi $0
11678 CFI_REL_OFFSET eip, 0
11679
11680 pushl_cfi %eax
11681 SAVE_ALL
11682+ GET_THREAD_INFO(%ebp)
11683+ movl TI_sysenter_return(%ebp),%ebp
11684+ movl %ebp,PT_EIP(%esp)
11685 ENABLE_INTERRUPTS(CLBR_NONE)
11686
11687 /*
11688 * Load the potential sixth argument from user stack.
11689 * Careful about security.
11690 */
11691+ movl PT_OLDESP(%esp),%ebp
11692+
11693+#ifdef CONFIG_PAX_MEMORY_UDEREF
11694+ mov PT_OLDSS(%esp),%ds
11695+1: movl %ds:(%ebp),%ebp
11696+ push %ss
11697+ pop %ds
11698+#else
11699 cmpl $__PAGE_OFFSET-3,%ebp
11700 jae syscall_fault
11701 1: movl (%ebp),%ebp
11702+#endif
11703+
11704 movl %ebp,PT_EBP(%esp)
11705 .section __ex_table,"a"
11706 .align 4
11707@@ -433,12 +594,24 @@ sysenter_do_call:
11708 testl $_TIF_ALLWORK_MASK, %ecx
11709 jne sysexit_audit
11710 sysenter_exit:
11711+
11712+#ifdef CONFIG_PAX_RANDKSTACK
11713+ pushl_cfi %eax
11714+ movl %esp, %eax
11715+ call pax_randomize_kstack
11716+ popl_cfi %eax
11717+#endif
11718+
11719+ pax_erase_kstack
11720+
11721 /* if something modifies registers it must also disable sysexit */
11722 movl PT_EIP(%esp), %edx
11723 movl PT_OLDESP(%esp), %ecx
11724 xorl %ebp,%ebp
11725 TRACE_IRQS_ON
11726 1: mov PT_FS(%esp), %fs
11727+2: mov PT_DS(%esp), %ds
11728+3: mov PT_ES(%esp), %es
11729 PTGS_TO_GS
11730 ENABLE_INTERRUPTS_SYSEXIT
11731
11732@@ -455,6 +628,9 @@ sysenter_audit:
11733 movl %eax,%edx /* 2nd arg: syscall number */
11734 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11735 call audit_syscall_entry
11736+
11737+ pax_erase_kstack
11738+
11739 pushl_cfi %ebx
11740 movl PT_EAX(%esp),%eax /* reload syscall number */
11741 jmp sysenter_do_call
11742@@ -481,11 +657,17 @@ sysexit_audit:
11743
11744 CFI_ENDPROC
11745 .pushsection .fixup,"ax"
11746-2: movl $0,PT_FS(%esp)
11747+4: movl $0,PT_FS(%esp)
11748+ jmp 1b
11749+5: movl $0,PT_DS(%esp)
11750+ jmp 1b
11751+6: movl $0,PT_ES(%esp)
11752 jmp 1b
11753 .section __ex_table,"a"
11754 .align 4
11755- .long 1b,2b
11756+ .long 1b,4b
11757+ .long 2b,5b
11758+ .long 3b,6b
11759 .popsection
11760 PTGS_TO_GS_EX
11761 ENDPROC(ia32_sysenter_target)
11762@@ -518,6 +700,15 @@ syscall_exit:
11763 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11764 jne syscall_exit_work
11765
11766+restore_all_pax:
11767+
11768+#ifdef CONFIG_PAX_RANDKSTACK
11769+ movl %esp, %eax
11770+ call pax_randomize_kstack
11771+#endif
11772+
11773+ pax_erase_kstack
11774+
11775 restore_all:
11776 TRACE_IRQS_IRET
11777 restore_all_notrace:
11778@@ -577,14 +768,34 @@ ldt_ss:
11779 * compensating for the offset by changing to the ESPFIX segment with
11780 * a base address that matches for the difference.
11781 */
11782-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11783+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11784 mov %esp, %edx /* load kernel esp */
11785 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11786 mov %dx, %ax /* eax: new kernel esp */
11787 sub %eax, %edx /* offset (low word is 0) */
11788+#ifdef CONFIG_SMP
11789+ movl PER_CPU_VAR(cpu_number), %ebx
11790+ shll $PAGE_SHIFT_asm, %ebx
11791+ addl $cpu_gdt_table, %ebx
11792+#else
11793+ movl $cpu_gdt_table, %ebx
11794+#endif
11795 shr $16, %edx
11796- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11797- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11798+
11799+#ifdef CONFIG_PAX_KERNEXEC
11800+ mov %cr0, %esi
11801+ btr $16, %esi
11802+ mov %esi, %cr0
11803+#endif
11804+
11805+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11806+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11807+
11808+#ifdef CONFIG_PAX_KERNEXEC
11809+ bts $16, %esi
11810+ mov %esi, %cr0
11811+#endif
11812+
11813 pushl_cfi $__ESPFIX_SS
11814 pushl_cfi %eax /* new kernel esp */
11815 /* Disable interrupts, but do not irqtrace this section: we
11816@@ -613,29 +824,23 @@ work_resched:
11817 movl TI_flags(%ebp), %ecx
11818 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11819 # than syscall tracing?
11820- jz restore_all
11821+ jz restore_all_pax
11822 testb $_TIF_NEED_RESCHED, %cl
11823 jnz work_resched
11824
11825 work_notifysig: # deal with pending signals and
11826 # notify-resume requests
11827+ movl %esp, %eax
11828 #ifdef CONFIG_VM86
11829 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11830- movl %esp, %eax
11831- jne work_notifysig_v86 # returning to kernel-space or
11832+ jz 1f # returning to kernel-space or
11833 # vm86-space
11834- xorl %edx, %edx
11835- call do_notify_resume
11836- jmp resume_userspace_sig
11837
11838- ALIGN
11839-work_notifysig_v86:
11840 pushl_cfi %ecx # save ti_flags for do_notify_resume
11841 call save_v86_state # %eax contains pt_regs pointer
11842 popl_cfi %ecx
11843 movl %eax, %esp
11844-#else
11845- movl %esp, %eax
11846+1:
11847 #endif
11848 xorl %edx, %edx
11849 call do_notify_resume
11850@@ -648,6 +853,9 @@ syscall_trace_entry:
11851 movl $-ENOSYS,PT_EAX(%esp)
11852 movl %esp, %eax
11853 call syscall_trace_enter
11854+
11855+ pax_erase_kstack
11856+
11857 /* What it returned is what we'll actually use. */
11858 cmpl $(nr_syscalls), %eax
11859 jnae syscall_call
11860@@ -670,6 +878,10 @@ END(syscall_exit_work)
11861
11862 RING0_INT_FRAME # can't unwind into user space anyway
11863 syscall_fault:
11864+#ifdef CONFIG_PAX_MEMORY_UDEREF
11865+ push %ss
11866+ pop %ds
11867+#endif
11868 GET_THREAD_INFO(%ebp)
11869 movl $-EFAULT,PT_EAX(%esp)
11870 jmp resume_userspace
11871@@ -752,6 +964,36 @@ ptregs_clone:
11872 CFI_ENDPROC
11873 ENDPROC(ptregs_clone)
11874
11875+ ALIGN;
11876+ENTRY(kernel_execve)
11877+ CFI_STARTPROC
11878+ pushl_cfi %ebp
11879+ sub $PT_OLDSS+4,%esp
11880+ pushl_cfi %edi
11881+ pushl_cfi %ecx
11882+ pushl_cfi %eax
11883+ lea 3*4(%esp),%edi
11884+ mov $PT_OLDSS/4+1,%ecx
11885+ xorl %eax,%eax
11886+ rep stosl
11887+ popl_cfi %eax
11888+ popl_cfi %ecx
11889+ popl_cfi %edi
11890+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11891+ pushl_cfi %esp
11892+ call sys_execve
11893+ add $4,%esp
11894+ CFI_ADJUST_CFA_OFFSET -4
11895+ GET_THREAD_INFO(%ebp)
11896+ test %eax,%eax
11897+ jz syscall_exit
11898+ add $PT_OLDSS+4,%esp
11899+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11900+ popl_cfi %ebp
11901+ ret
11902+ CFI_ENDPROC
11903+ENDPROC(kernel_execve)
11904+
11905 .macro FIXUP_ESPFIX_STACK
11906 /*
11907 * Switch back for ESPFIX stack to the normal zerobased stack
11908@@ -761,8 +1003,15 @@ ENDPROC(ptregs_clone)
11909 * normal stack and adjusts ESP with the matching offset.
11910 */
11911 /* fixup the stack */
11912- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11913- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11914+#ifdef CONFIG_SMP
11915+ movl PER_CPU_VAR(cpu_number), %ebx
11916+ shll $PAGE_SHIFT_asm, %ebx
11917+ addl $cpu_gdt_table, %ebx
11918+#else
11919+ movl $cpu_gdt_table, %ebx
11920+#endif
11921+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11922+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11923 shl $16, %eax
11924 addl %esp, %eax /* the adjusted stack pointer */
11925 pushl_cfi $__KERNEL_DS
11926@@ -1213,7 +1462,6 @@ return_to_handler:
11927 jmp *%ecx
11928 #endif
11929
11930-.section .rodata,"a"
11931 #include "syscall_table_32.S"
11932
11933 syscall_table_size=(.-sys_call_table)
11934@@ -1259,9 +1507,12 @@ error_code:
11935 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11936 REG_TO_PTGS %ecx
11937 SET_KERNEL_GS %ecx
11938- movl $(__USER_DS), %ecx
11939+ movl $(__KERNEL_DS), %ecx
11940 movl %ecx, %ds
11941 movl %ecx, %es
11942+
11943+ pax_enter_kernel
11944+
11945 TRACE_IRQS_OFF
11946 movl %esp,%eax # pt_regs pointer
11947 call *%edi
11948@@ -1346,6 +1597,9 @@ nmi_stack_correct:
11949 xorl %edx,%edx # zero error code
11950 movl %esp,%eax # pt_regs pointer
11951 call do_nmi
11952+
11953+ pax_exit_kernel
11954+
11955 jmp restore_all_notrace
11956 CFI_ENDPROC
11957
11958@@ -1382,6 +1636,9 @@ nmi_espfix_stack:
11959 FIXUP_ESPFIX_STACK # %eax == %esp
11960 xorl %edx,%edx # zero error code
11961 call do_nmi
11962+
11963+ pax_exit_kernel
11964+
11965 RESTORE_REGS
11966 lss 12+4(%esp), %esp # back to espfix stack
11967 CFI_ADJUST_CFA_OFFSET -24
11968diff -urNp linux-3.0.7/arch/x86/kernel/entry_64.S linux-3.0.7/arch/x86/kernel/entry_64.S
11969--- linux-3.0.7/arch/x86/kernel/entry_64.S 2011-07-21 22:17:23.000000000 -0400
11970+++ linux-3.0.7/arch/x86/kernel/entry_64.S 2011-10-11 10:44:33.000000000 -0400
11971@@ -53,6 +53,8 @@
11972 #include <asm/paravirt.h>
11973 #include <asm/ftrace.h>
11974 #include <asm/percpu.h>
11975+#include <asm/pgtable.h>
11976+#include <asm/alternative-asm.h>
11977
11978 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11979 #include <linux/elf-em.h>
11980@@ -66,6 +68,7 @@
11981 #ifdef CONFIG_FUNCTION_TRACER
11982 #ifdef CONFIG_DYNAMIC_FTRACE
11983 ENTRY(mcount)
11984+ pax_force_retaddr
11985 retq
11986 END(mcount)
11987
11988@@ -90,6 +93,7 @@ GLOBAL(ftrace_graph_call)
11989 #endif
11990
11991 GLOBAL(ftrace_stub)
11992+ pax_force_retaddr
11993 retq
11994 END(ftrace_caller)
11995
11996@@ -110,6 +114,7 @@ ENTRY(mcount)
11997 #endif
11998
11999 GLOBAL(ftrace_stub)
12000+ pax_force_retaddr
12001 retq
12002
12003 trace:
12004@@ -119,6 +124,7 @@ trace:
12005 movq 8(%rbp), %rsi
12006 subq $MCOUNT_INSN_SIZE, %rdi
12007
12008+ pax_force_fptr ftrace_trace_function
12009 call *ftrace_trace_function
12010
12011 MCOUNT_RESTORE_FRAME
12012@@ -144,6 +150,7 @@ ENTRY(ftrace_graph_caller)
12013
12014 MCOUNT_RESTORE_FRAME
12015
12016+ pax_force_retaddr
12017 retq
12018 END(ftrace_graph_caller)
12019
12020@@ -161,6 +168,7 @@ GLOBAL(return_to_handler)
12021 movq 8(%rsp), %rdx
12022 movq (%rsp), %rax
12023 addq $24, %rsp
12024+ pax_force_fptr %rdi
12025 jmp *%rdi
12026 #endif
12027
12028@@ -176,6 +184,269 @@ ENTRY(native_usergs_sysret64)
12029 ENDPROC(native_usergs_sysret64)
12030 #endif /* CONFIG_PARAVIRT */
12031
12032+ .macro ljmpq sel, off
12033+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12034+ .byte 0x48; ljmp *1234f(%rip)
12035+ .pushsection .rodata
12036+ .align 16
12037+ 1234: .quad \off; .word \sel
12038+ .popsection
12039+#else
12040+ pushq $\sel
12041+ pushq $\off
12042+ lretq
12043+#endif
12044+ .endm
12045+
12046+ .macro pax_enter_kernel
12047+#ifdef CONFIG_PAX_KERNEXEC
12048+ call pax_enter_kernel
12049+#endif
12050+ .endm
12051+
12052+ .macro pax_exit_kernel
12053+#ifdef CONFIG_PAX_KERNEXEC
12054+ call pax_exit_kernel
12055+#endif
12056+ .endm
12057+
12058+#ifdef CONFIG_PAX_KERNEXEC
12059+ENTRY(pax_enter_kernel)
12060+ pushq %rdi
12061+
12062+#ifdef CONFIG_PARAVIRT
12063+ PV_SAVE_REGS(CLBR_RDI)
12064+#endif
12065+
12066+ GET_CR0_INTO_RDI
12067+ bts $16,%rdi
12068+ jnc 1f
12069+ mov %cs,%edi
12070+ cmp $__KERNEL_CS,%edi
12071+ jz 3f
12072+ ljmpq __KERNEL_CS,3f
12073+1: ljmpq __KERNEXEC_KERNEL_CS,2f
12074+2: SET_RDI_INTO_CR0
12075+3:
12076+
12077+#ifdef CONFIG_PARAVIRT
12078+ PV_RESTORE_REGS(CLBR_RDI)
12079+#endif
12080+
12081+ popq %rdi
12082+ pax_force_retaddr
12083+ retq
12084+ENDPROC(pax_enter_kernel)
12085+
12086+ENTRY(pax_exit_kernel)
12087+ pushq %rdi
12088+
12089+#ifdef CONFIG_PARAVIRT
12090+ PV_SAVE_REGS(CLBR_RDI)
12091+#endif
12092+
12093+ mov %cs,%rdi
12094+ cmp $__KERNEXEC_KERNEL_CS,%edi
12095+ jnz 2f
12096+ GET_CR0_INTO_RDI
12097+ btr $16,%rdi
12098+ ljmpq __KERNEL_CS,1f
12099+1: SET_RDI_INTO_CR0
12100+2:
12101+
12102+#ifdef CONFIG_PARAVIRT
12103+ PV_RESTORE_REGS(CLBR_RDI);
12104+#endif
12105+
12106+ popq %rdi
12107+ pax_force_retaddr
12108+ retq
12109+ENDPROC(pax_exit_kernel)
12110+#endif
12111+
12112+ .macro pax_enter_kernel_user
12113+#ifdef CONFIG_PAX_MEMORY_UDEREF
12114+ call pax_enter_kernel_user
12115+#endif
12116+ .endm
12117+
12118+ .macro pax_exit_kernel_user
12119+#ifdef CONFIG_PAX_MEMORY_UDEREF
12120+ call pax_exit_kernel_user
12121+#endif
12122+#ifdef CONFIG_PAX_RANDKSTACK
12123+ push %rax
12124+ call pax_randomize_kstack
12125+ pop %rax
12126+#endif
12127+ .endm
12128+
12129+#ifdef CONFIG_PAX_MEMORY_UDEREF
12130+ENTRY(pax_enter_kernel_user)
12131+ pushq %rdi
12132+ pushq %rbx
12133+
12134+#ifdef CONFIG_PARAVIRT
12135+ PV_SAVE_REGS(CLBR_RDI)
12136+#endif
12137+
12138+ GET_CR3_INTO_RDI
12139+ mov %rdi,%rbx
12140+ add $__START_KERNEL_map,%rbx
12141+ sub phys_base(%rip),%rbx
12142+
12143+#ifdef CONFIG_PARAVIRT
12144+ pushq %rdi
12145+ cmpl $0, pv_info+PARAVIRT_enabled
12146+ jz 1f
12147+ i = 0
12148+ .rept USER_PGD_PTRS
12149+ mov i*8(%rbx),%rsi
12150+ mov $0,%sil
12151+ lea i*8(%rbx),%rdi
12152+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12153+ i = i + 1
12154+ .endr
12155+ jmp 2f
12156+1:
12157+#endif
12158+
12159+ i = 0
12160+ .rept USER_PGD_PTRS
12161+ movb $0,i*8(%rbx)
12162+ i = i + 1
12163+ .endr
12164+
12165+#ifdef CONFIG_PARAVIRT
12166+2: popq %rdi
12167+#endif
12168+ SET_RDI_INTO_CR3
12169+
12170+#ifdef CONFIG_PAX_KERNEXEC
12171+ GET_CR0_INTO_RDI
12172+ bts $16,%rdi
12173+ SET_RDI_INTO_CR0
12174+#endif
12175+
12176+#ifdef CONFIG_PARAVIRT
12177+ PV_RESTORE_REGS(CLBR_RDI)
12178+#endif
12179+
12180+ popq %rbx
12181+ popq %rdi
12182+ pax_force_retaddr
12183+ retq
12184+ENDPROC(pax_enter_kernel_user)
12185+
12186+ENTRY(pax_exit_kernel_user)
12187+ push %rdi
12188+
12189+#ifdef CONFIG_PARAVIRT
12190+ pushq %rbx
12191+ PV_SAVE_REGS(CLBR_RDI)
12192+#endif
12193+
12194+#ifdef CONFIG_PAX_KERNEXEC
12195+ GET_CR0_INTO_RDI
12196+ btr $16,%rdi
12197+ SET_RDI_INTO_CR0
12198+#endif
12199+
12200+ GET_CR3_INTO_RDI
12201+ add $__START_KERNEL_map,%rdi
12202+ sub phys_base(%rip),%rdi
12203+
12204+#ifdef CONFIG_PARAVIRT
12205+ cmpl $0, pv_info+PARAVIRT_enabled
12206+ jz 1f
12207+ mov %rdi,%rbx
12208+ i = 0
12209+ .rept USER_PGD_PTRS
12210+ mov i*8(%rbx),%rsi
12211+ mov $0x67,%sil
12212+ lea i*8(%rbx),%rdi
12213+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12214+ i = i + 1
12215+ .endr
12216+ jmp 2f
12217+1:
12218+#endif
12219+
12220+ i = 0
12221+ .rept USER_PGD_PTRS
12222+ movb $0x67,i*8(%rdi)
12223+ i = i + 1
12224+ .endr
12225+
12226+#ifdef CONFIG_PARAVIRT
12227+2: PV_RESTORE_REGS(CLBR_RDI)
12228+ popq %rbx
12229+#endif
12230+
12231+ popq %rdi
12232+ pax_force_retaddr
12233+ retq
12234+ENDPROC(pax_exit_kernel_user)
12235+#endif
12236+
12237+ .macro pax_erase_kstack
12238+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12239+ call pax_erase_kstack
12240+#endif
12241+ .endm
12242+
12243+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12244+/*
12245+ * r10: thread_info
12246+ * rcx, rdx: can be clobbered
12247+ */
12248+ENTRY(pax_erase_kstack)
12249+ pushq %rdi
12250+ pushq %rax
12251+ pushq %r10
12252+
12253+ GET_THREAD_INFO(%r10)
12254+ mov TI_lowest_stack(%r10), %rdi
12255+ mov $-0xBEEF, %rax
12256+ std
12257+
12258+1: mov %edi, %ecx
12259+ and $THREAD_SIZE_asm - 1, %ecx
12260+ shr $3, %ecx
12261+ repne scasq
12262+ jecxz 2f
12263+
12264+ cmp $2*8, %ecx
12265+ jc 2f
12266+
12267+ mov $2*8, %ecx
12268+ repe scasq
12269+ jecxz 2f
12270+ jne 1b
12271+
12272+2: cld
12273+ mov %esp, %ecx
12274+ sub %edi, %ecx
12275+
12276+ cmp $THREAD_SIZE_asm, %rcx
12277+ jb 3f
12278+ ud2
12279+3:
12280+
12281+ shr $3, %ecx
12282+ rep stosq
12283+
12284+ mov TI_task_thread_sp0(%r10), %rdi
12285+ sub $256, %rdi
12286+ mov %rdi, TI_lowest_stack(%r10)
12287+
12288+ popq %r10
12289+ popq %rax
12290+ popq %rdi
12291+ pax_force_retaddr
12292+ ret
12293+ENDPROC(pax_erase_kstack)
12294+#endif
12295
12296 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12297 #ifdef CONFIG_TRACE_IRQFLAGS
12298@@ -318,7 +589,7 @@ ENTRY(save_args)
12299 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12300 movq_cfi rbp, 8 /* push %rbp */
12301 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12302- testl $3, CS(%rdi)
12303+ testb $3, CS(%rdi)
12304 je 1f
12305 SWAPGS
12306 /*
12307@@ -338,6 +609,7 @@ ENTRY(save_args)
12308 * We entered an interrupt context - irqs are off:
12309 */
12310 2: TRACE_IRQS_OFF
12311+ pax_force_retaddr
12312 ret
12313 CFI_ENDPROC
12314 END(save_args)
12315@@ -354,6 +626,7 @@ ENTRY(save_rest)
12316 movq_cfi r15, R15+16
12317 movq %r11, 8(%rsp) /* return address */
12318 FIXUP_TOP_OF_STACK %r11, 16
12319+ pax_force_retaddr
12320 ret
12321 CFI_ENDPROC
12322 END(save_rest)
12323@@ -385,7 +658,8 @@ ENTRY(save_paranoid)
12324 js 1f /* negative -> in kernel */
12325 SWAPGS
12326 xorl %ebx,%ebx
12327-1: ret
12328+1: pax_force_retaddr
12329+ ret
12330 CFI_ENDPROC
12331 END(save_paranoid)
12332 .popsection
12333@@ -409,7 +683,7 @@ ENTRY(ret_from_fork)
12334
12335 RESTORE_REST
12336
12337- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12338+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12339 je int_ret_from_sys_call
12340
12341 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12342@@ -455,7 +729,7 @@ END(ret_from_fork)
12343 ENTRY(system_call)
12344 CFI_STARTPROC simple
12345 CFI_SIGNAL_FRAME
12346- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12347+ CFI_DEF_CFA rsp,0
12348 CFI_REGISTER rip,rcx
12349 /*CFI_REGISTER rflags,r11*/
12350 SWAPGS_UNSAFE_STACK
12351@@ -468,12 +742,13 @@ ENTRY(system_call_after_swapgs)
12352
12353 movq %rsp,PER_CPU_VAR(old_rsp)
12354 movq PER_CPU_VAR(kernel_stack),%rsp
12355+ pax_enter_kernel_user
12356 /*
12357 * No need to follow this irqs off/on section - it's straight
12358 * and short:
12359 */
12360 ENABLE_INTERRUPTS(CLBR_NONE)
12361- SAVE_ARGS 8,1
12362+ SAVE_ARGS 8*6,1
12363 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12364 movq %rcx,RIP-ARGOFFSET(%rsp)
12365 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12366@@ -502,6 +777,8 @@ sysret_check:
12367 andl %edi,%edx
12368 jnz sysret_careful
12369 CFI_REMEMBER_STATE
12370+ pax_exit_kernel_user
12371+ pax_erase_kstack
12372 /*
12373 * sysretq will re-enable interrupts:
12374 */
12375@@ -560,6 +837,9 @@ auditsys:
12376 movq %rax,%rsi /* 2nd arg: syscall number */
12377 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12378 call audit_syscall_entry
12379+
12380+ pax_erase_kstack
12381+
12382 LOAD_ARGS 0 /* reload call-clobbered registers */
12383 jmp system_call_fastpath
12384
12385@@ -590,6 +870,9 @@ tracesys:
12386 FIXUP_TOP_OF_STACK %rdi
12387 movq %rsp,%rdi
12388 call syscall_trace_enter
12389+
12390+ pax_erase_kstack
12391+
12392 /*
12393 * Reload arg registers from stack in case ptrace changed them.
12394 * We don't reload %rax because syscall_trace_enter() returned
12395@@ -611,7 +894,7 @@ tracesys:
12396 GLOBAL(int_ret_from_sys_call)
12397 DISABLE_INTERRUPTS(CLBR_NONE)
12398 TRACE_IRQS_OFF
12399- testl $3,CS-ARGOFFSET(%rsp)
12400+ testb $3,CS-ARGOFFSET(%rsp)
12401 je retint_restore_args
12402 movl $_TIF_ALLWORK_MASK,%edi
12403 /* edi: mask to check */
12404@@ -702,6 +985,7 @@ ENTRY(ptregscall_common)
12405 movq_cfi_restore R12+8, r12
12406 movq_cfi_restore RBP+8, rbp
12407 movq_cfi_restore RBX+8, rbx
12408+ pax_force_retaddr
12409 ret $REST_SKIP /* pop extended registers */
12410 CFI_ENDPROC
12411 END(ptregscall_common)
12412@@ -793,6 +1077,16 @@ END(interrupt)
12413 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12414 call save_args
12415 PARTIAL_FRAME 0
12416+#ifdef CONFIG_PAX_MEMORY_UDEREF
12417+ testb $3, CS(%rdi)
12418+ jnz 1f
12419+ pax_enter_kernel
12420+ jmp 2f
12421+1: pax_enter_kernel_user
12422+2:
12423+#else
12424+ pax_enter_kernel
12425+#endif
12426 call \func
12427 .endm
12428
12429@@ -825,7 +1119,7 @@ ret_from_intr:
12430 CFI_ADJUST_CFA_OFFSET -8
12431 exit_intr:
12432 GET_THREAD_INFO(%rcx)
12433- testl $3,CS-ARGOFFSET(%rsp)
12434+ testb $3,CS-ARGOFFSET(%rsp)
12435 je retint_kernel
12436
12437 /* Interrupt came from user space */
12438@@ -847,12 +1141,16 @@ retint_swapgs: /* return to user-space
12439 * The iretq could re-enable interrupts:
12440 */
12441 DISABLE_INTERRUPTS(CLBR_ANY)
12442+ pax_exit_kernel_user
12443+ pax_erase_kstack
12444 TRACE_IRQS_IRETQ
12445 SWAPGS
12446 jmp restore_args
12447
12448 retint_restore_args: /* return to kernel space */
12449 DISABLE_INTERRUPTS(CLBR_ANY)
12450+ pax_exit_kernel
12451+ pax_force_retaddr RIP-ARGOFFSET
12452 /*
12453 * The iretq could re-enable interrupts:
12454 */
12455@@ -1027,6 +1325,16 @@ ENTRY(\sym)
12456 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12457 call error_entry
12458 DEFAULT_FRAME 0
12459+#ifdef CONFIG_PAX_MEMORY_UDEREF
12460+ testb $3, CS(%rsp)
12461+ jnz 1f
12462+ pax_enter_kernel
12463+ jmp 2f
12464+1: pax_enter_kernel_user
12465+2:
12466+#else
12467+ pax_enter_kernel
12468+#endif
12469 movq %rsp,%rdi /* pt_regs pointer */
12470 xorl %esi,%esi /* no error code */
12471 call \do_sym
12472@@ -1044,6 +1352,16 @@ ENTRY(\sym)
12473 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12474 call save_paranoid
12475 TRACE_IRQS_OFF
12476+#ifdef CONFIG_PAX_MEMORY_UDEREF
12477+ testb $3, CS(%rsp)
12478+ jnz 1f
12479+ pax_enter_kernel
12480+ jmp 2f
12481+1: pax_enter_kernel_user
12482+2:
12483+#else
12484+ pax_enter_kernel
12485+#endif
12486 movq %rsp,%rdi /* pt_regs pointer */
12487 xorl %esi,%esi /* no error code */
12488 call \do_sym
12489@@ -1052,7 +1370,7 @@ ENTRY(\sym)
12490 END(\sym)
12491 .endm
12492
12493-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12494+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12495 .macro paranoidzeroentry_ist sym do_sym ist
12496 ENTRY(\sym)
12497 INTR_FRAME
12498@@ -1062,8 +1380,24 @@ ENTRY(\sym)
12499 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12500 call save_paranoid
12501 TRACE_IRQS_OFF
12502+#ifdef CONFIG_PAX_MEMORY_UDEREF
12503+ testb $3, CS(%rsp)
12504+ jnz 1f
12505+ pax_enter_kernel
12506+ jmp 2f
12507+1: pax_enter_kernel_user
12508+2:
12509+#else
12510+ pax_enter_kernel
12511+#endif
12512 movq %rsp,%rdi /* pt_regs pointer */
12513 xorl %esi,%esi /* no error code */
12514+#ifdef CONFIG_SMP
12515+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12516+ lea init_tss(%r12), %r12
12517+#else
12518+ lea init_tss(%rip), %r12
12519+#endif
12520 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12521 call \do_sym
12522 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12523@@ -1080,6 +1414,16 @@ ENTRY(\sym)
12524 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12525 call error_entry
12526 DEFAULT_FRAME 0
12527+#ifdef CONFIG_PAX_MEMORY_UDEREF
12528+ testb $3, CS(%rsp)
12529+ jnz 1f
12530+ pax_enter_kernel
12531+ jmp 2f
12532+1: pax_enter_kernel_user
12533+2:
12534+#else
12535+ pax_enter_kernel
12536+#endif
12537 movq %rsp,%rdi /* pt_regs pointer */
12538 movq ORIG_RAX(%rsp),%rsi /* get error code */
12539 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12540@@ -1099,6 +1443,16 @@ ENTRY(\sym)
12541 call save_paranoid
12542 DEFAULT_FRAME 0
12543 TRACE_IRQS_OFF
12544+#ifdef CONFIG_PAX_MEMORY_UDEREF
12545+ testb $3, CS(%rsp)
12546+ jnz 1f
12547+ pax_enter_kernel
12548+ jmp 2f
12549+1: pax_enter_kernel_user
12550+2:
12551+#else
12552+ pax_enter_kernel
12553+#endif
12554 movq %rsp,%rdi /* pt_regs pointer */
12555 movq ORIG_RAX(%rsp),%rsi /* get error code */
12556 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12557@@ -1134,6 +1488,7 @@ gs_change:
12558 2: mfence /* workaround */
12559 SWAPGS
12560 popfq_cfi
12561+ pax_force_retaddr
12562 ret
12563 CFI_ENDPROC
12564 END(native_load_gs_index)
12565@@ -1158,6 +1513,7 @@ ENTRY(kernel_thread_helper)
12566 * Here we are in the child and the registers are set as they were
12567 * at kernel_thread() invocation in the parent.
12568 */
12569+ pax_force_fptr %rsi
12570 call *%rsi
12571 # exit
12572 mov %eax, %edi
12573@@ -1193,9 +1549,10 @@ ENTRY(kernel_execve)
12574 je int_ret_from_sys_call
12575 RESTORE_ARGS
12576 UNFAKE_STACK_FRAME
12577+ pax_force_retaddr
12578 ret
12579 CFI_ENDPROC
12580-END(kernel_execve)
12581+ENDPROC(kernel_execve)
12582
12583 /* Call softirq on interrupt stack. Interrupts are off. */
12584 ENTRY(call_softirq)
12585@@ -1213,9 +1570,10 @@ ENTRY(call_softirq)
12586 CFI_DEF_CFA_REGISTER rsp
12587 CFI_ADJUST_CFA_OFFSET -8
12588 decl PER_CPU_VAR(irq_count)
12589+ pax_force_retaddr
12590 ret
12591 CFI_ENDPROC
12592-END(call_softirq)
12593+ENDPROC(call_softirq)
12594
12595 #ifdef CONFIG_XEN
12596 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
12597@@ -1361,16 +1719,31 @@ ENTRY(paranoid_exit)
12598 TRACE_IRQS_OFF
12599 testl %ebx,%ebx /* swapgs needed? */
12600 jnz paranoid_restore
12601- testl $3,CS(%rsp)
12602+ testb $3,CS(%rsp)
12603 jnz paranoid_userspace
12604+#ifdef CONFIG_PAX_MEMORY_UDEREF
12605+ pax_exit_kernel
12606+ TRACE_IRQS_IRETQ 0
12607+ SWAPGS_UNSAFE_STACK
12608+ RESTORE_ALL 8
12609+ pax_force_retaddr
12610+ jmp irq_return
12611+#endif
12612 paranoid_swapgs:
12613+#ifdef CONFIG_PAX_MEMORY_UDEREF
12614+ pax_exit_kernel_user
12615+#else
12616+ pax_exit_kernel
12617+#endif
12618 TRACE_IRQS_IRETQ 0
12619 SWAPGS_UNSAFE_STACK
12620 RESTORE_ALL 8
12621 jmp irq_return
12622 paranoid_restore:
12623+ pax_exit_kernel
12624 TRACE_IRQS_IRETQ 0
12625 RESTORE_ALL 8
12626+ pax_force_retaddr
12627 jmp irq_return
12628 paranoid_userspace:
12629 GET_THREAD_INFO(%rcx)
12630@@ -1426,12 +1799,13 @@ ENTRY(error_entry)
12631 movq_cfi r14, R14+8
12632 movq_cfi r15, R15+8
12633 xorl %ebx,%ebx
12634- testl $3,CS+8(%rsp)
12635+ testb $3,CS+8(%rsp)
12636 je error_kernelspace
12637 error_swapgs:
12638 SWAPGS
12639 error_sti:
12640 TRACE_IRQS_OFF
12641+ pax_force_retaddr
12642 ret
12643
12644 /*
12645@@ -1490,6 +1864,16 @@ ENTRY(nmi)
12646 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12647 call save_paranoid
12648 DEFAULT_FRAME 0
12649+#ifdef CONFIG_PAX_MEMORY_UDEREF
12650+ testb $3, CS(%rsp)
12651+ jnz 1f
12652+ pax_enter_kernel
12653+ jmp 2f
12654+1: pax_enter_kernel_user
12655+2:
12656+#else
12657+ pax_enter_kernel
12658+#endif
12659 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12660 movq %rsp,%rdi
12661 movq $-1,%rsi
12662@@ -1500,12 +1884,28 @@ ENTRY(nmi)
12663 DISABLE_INTERRUPTS(CLBR_NONE)
12664 testl %ebx,%ebx /* swapgs needed? */
12665 jnz nmi_restore
12666- testl $3,CS(%rsp)
12667+ testb $3,CS(%rsp)
12668 jnz nmi_userspace
12669+#ifdef CONFIG_PAX_MEMORY_UDEREF
12670+ pax_exit_kernel
12671+ SWAPGS_UNSAFE_STACK
12672+ RESTORE_ALL 8
12673+ pax_force_retaddr
12674+ jmp irq_return
12675+#endif
12676 nmi_swapgs:
12677+#ifdef CONFIG_PAX_MEMORY_UDEREF
12678+ pax_exit_kernel_user
12679+#else
12680+ pax_exit_kernel
12681+#endif
12682 SWAPGS_UNSAFE_STACK
12683+ RESTORE_ALL 8
12684+ jmp irq_return
12685 nmi_restore:
12686+ pax_exit_kernel
12687 RESTORE_ALL 8
12688+ pax_force_retaddr
12689 jmp irq_return
12690 nmi_userspace:
12691 GET_THREAD_INFO(%rcx)
12692diff -urNp linux-3.0.7/arch/x86/kernel/ftrace.c linux-3.0.7/arch/x86/kernel/ftrace.c
12693--- linux-3.0.7/arch/x86/kernel/ftrace.c 2011-07-21 22:17:23.000000000 -0400
12694+++ linux-3.0.7/arch/x86/kernel/ftrace.c 2011-08-23 21:47:55.000000000 -0400
12695@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12696 static const void *mod_code_newcode; /* holds the text to write to the IP */
12697
12698 static unsigned nmi_wait_count;
12699-static atomic_t nmi_update_count = ATOMIC_INIT(0);
12700+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12701
12702 int ftrace_arch_read_dyn_info(char *buf, int size)
12703 {
12704@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12705
12706 r = snprintf(buf, size, "%u %u",
12707 nmi_wait_count,
12708- atomic_read(&nmi_update_count));
12709+ atomic_read_unchecked(&nmi_update_count));
12710 return r;
12711 }
12712
12713@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12714
12715 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12716 smp_rmb();
12717+ pax_open_kernel();
12718 ftrace_mod_code();
12719- atomic_inc(&nmi_update_count);
12720+ pax_close_kernel();
12721+ atomic_inc_unchecked(&nmi_update_count);
12722 }
12723 /* Must have previous changes seen before executions */
12724 smp_mb();
12725@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12726 {
12727 unsigned char replaced[MCOUNT_INSN_SIZE];
12728
12729+ ip = ktla_ktva(ip);
12730+
12731 /*
12732 * Note: Due to modules and __init, code can
12733 * disappear and change, we need to protect against faulting
12734@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12735 unsigned char old[MCOUNT_INSN_SIZE], *new;
12736 int ret;
12737
12738- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12739+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12740 new = ftrace_call_replace(ip, (unsigned long)func);
12741 ret = ftrace_modify_code(ip, old, new);
12742
12743@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12744 {
12745 unsigned char code[MCOUNT_INSN_SIZE];
12746
12747+ ip = ktla_ktva(ip);
12748+
12749 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12750 return -EFAULT;
12751
12752diff -urNp linux-3.0.7/arch/x86/kernel/head32.c linux-3.0.7/arch/x86/kernel/head32.c
12753--- linux-3.0.7/arch/x86/kernel/head32.c 2011-07-21 22:17:23.000000000 -0400
12754+++ linux-3.0.7/arch/x86/kernel/head32.c 2011-08-23 21:47:55.000000000 -0400
12755@@ -19,6 +19,7 @@
12756 #include <asm/io_apic.h>
12757 #include <asm/bios_ebda.h>
12758 #include <asm/tlbflush.h>
12759+#include <asm/boot.h>
12760
12761 static void __init i386_default_early_setup(void)
12762 {
12763@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
12764 {
12765 memblock_init();
12766
12767- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12768+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12769
12770 #ifdef CONFIG_BLK_DEV_INITRD
12771 /* Reserve INITRD */
12772diff -urNp linux-3.0.7/arch/x86/kernel/head_32.S linux-3.0.7/arch/x86/kernel/head_32.S
12773--- linux-3.0.7/arch/x86/kernel/head_32.S 2011-07-21 22:17:23.000000000 -0400
12774+++ linux-3.0.7/arch/x86/kernel/head_32.S 2011-08-23 21:47:55.000000000 -0400
12775@@ -25,6 +25,12 @@
12776 /* Physical address */
12777 #define pa(X) ((X) - __PAGE_OFFSET)
12778
12779+#ifdef CONFIG_PAX_KERNEXEC
12780+#define ta(X) (X)
12781+#else
12782+#define ta(X) ((X) - __PAGE_OFFSET)
12783+#endif
12784+
12785 /*
12786 * References to members of the new_cpu_data structure.
12787 */
12788@@ -54,11 +60,7 @@
12789 * and small than max_low_pfn, otherwise will waste some page table entries
12790 */
12791
12792-#if PTRS_PER_PMD > 1
12793-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12794-#else
12795-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12796-#endif
12797+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12798
12799 /* Number of possible pages in the lowmem region */
12800 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12801@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12802 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12803
12804 /*
12805+ * Real beginning of normal "text" segment
12806+ */
12807+ENTRY(stext)
12808+ENTRY(_stext)
12809+
12810+/*
12811 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12812 * %esi points to the real-mode code as a 32-bit pointer.
12813 * CS and DS must be 4 GB flat segments, but we don't depend on
12814@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12815 * can.
12816 */
12817 __HEAD
12818+
12819+#ifdef CONFIG_PAX_KERNEXEC
12820+ jmp startup_32
12821+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12822+.fill PAGE_SIZE-5,1,0xcc
12823+#endif
12824+
12825 ENTRY(startup_32)
12826 movl pa(stack_start),%ecx
12827
12828@@ -105,6 +120,57 @@ ENTRY(startup_32)
12829 2:
12830 leal -__PAGE_OFFSET(%ecx),%esp
12831
12832+#ifdef CONFIG_SMP
12833+ movl $pa(cpu_gdt_table),%edi
12834+ movl $__per_cpu_load,%eax
12835+ movw %ax,__KERNEL_PERCPU + 2(%edi)
12836+ rorl $16,%eax
12837+ movb %al,__KERNEL_PERCPU + 4(%edi)
12838+ movb %ah,__KERNEL_PERCPU + 7(%edi)
12839+ movl $__per_cpu_end - 1,%eax
12840+ subl $__per_cpu_start,%eax
12841+ movw %ax,__KERNEL_PERCPU + 0(%edi)
12842+#endif
12843+
12844+#ifdef CONFIG_PAX_MEMORY_UDEREF
12845+ movl $NR_CPUS,%ecx
12846+ movl $pa(cpu_gdt_table),%edi
12847+1:
12848+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12849+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12850+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12851+ addl $PAGE_SIZE_asm,%edi
12852+ loop 1b
12853+#endif
12854+
12855+#ifdef CONFIG_PAX_KERNEXEC
12856+ movl $pa(boot_gdt),%edi
12857+ movl $__LOAD_PHYSICAL_ADDR,%eax
12858+ movw %ax,__BOOT_CS + 2(%edi)
12859+ rorl $16,%eax
12860+ movb %al,__BOOT_CS + 4(%edi)
12861+ movb %ah,__BOOT_CS + 7(%edi)
12862+ rorl $16,%eax
12863+
12864+ ljmp $(__BOOT_CS),$1f
12865+1:
12866+
12867+ movl $NR_CPUS,%ecx
12868+ movl $pa(cpu_gdt_table),%edi
12869+ addl $__PAGE_OFFSET,%eax
12870+1:
12871+ movw %ax,__KERNEL_CS + 2(%edi)
12872+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12873+ rorl $16,%eax
12874+ movb %al,__KERNEL_CS + 4(%edi)
12875+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12876+ movb %ah,__KERNEL_CS + 7(%edi)
12877+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12878+ rorl $16,%eax
12879+ addl $PAGE_SIZE_asm,%edi
12880+ loop 1b
12881+#endif
12882+
12883 /*
12884 * Clear BSS first so that there are no surprises...
12885 */
12886@@ -195,8 +261,11 @@ ENTRY(startup_32)
12887 movl %eax, pa(max_pfn_mapped)
12888
12889 /* Do early initialization of the fixmap area */
12890- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12891- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12892+#ifdef CONFIG_COMPAT_VDSO
12893+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12894+#else
12895+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12896+#endif
12897 #else /* Not PAE */
12898
12899 page_pde_offset = (__PAGE_OFFSET >> 20);
12900@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12901 movl %eax, pa(max_pfn_mapped)
12902
12903 /* Do early initialization of the fixmap area */
12904- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12905- movl %eax,pa(initial_page_table+0xffc)
12906+#ifdef CONFIG_COMPAT_VDSO
12907+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12908+#else
12909+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12910+#endif
12911 #endif
12912
12913 #ifdef CONFIG_PARAVIRT
12914@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12915 cmpl $num_subarch_entries, %eax
12916 jae bad_subarch
12917
12918- movl pa(subarch_entries)(,%eax,4), %eax
12919- subl $__PAGE_OFFSET, %eax
12920- jmp *%eax
12921+ jmp *pa(subarch_entries)(,%eax,4)
12922
12923 bad_subarch:
12924 WEAK(lguest_entry)
12925@@ -255,10 +325,10 @@ WEAK(xen_entry)
12926 __INITDATA
12927
12928 subarch_entries:
12929- .long default_entry /* normal x86/PC */
12930- .long lguest_entry /* lguest hypervisor */
12931- .long xen_entry /* Xen hypervisor */
12932- .long default_entry /* Moorestown MID */
12933+ .long ta(default_entry) /* normal x86/PC */
12934+ .long ta(lguest_entry) /* lguest hypervisor */
12935+ .long ta(xen_entry) /* Xen hypervisor */
12936+ .long ta(default_entry) /* Moorestown MID */
12937 num_subarch_entries = (. - subarch_entries) / 4
12938 .previous
12939 #else
12940@@ -312,6 +382,7 @@ default_entry:
12941 orl %edx,%eax
12942 movl %eax,%cr4
12943
12944+#ifdef CONFIG_X86_PAE
12945 testb $X86_CR4_PAE, %al # check if PAE is enabled
12946 jz 6f
12947
12948@@ -340,6 +411,9 @@ default_entry:
12949 /* Make changes effective */
12950 wrmsr
12951
12952+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12953+#endif
12954+
12955 6:
12956
12957 /*
12958@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12959 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12960 movl %eax,%ss # after changing gdt.
12961
12962- movl $(__USER_DS),%eax # DS/ES contains default USER segment
12963+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12964 movl %eax,%ds
12965 movl %eax,%es
12966
12967@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12968 */
12969 cmpb $0,ready
12970 jne 1f
12971- movl $gdt_page,%eax
12972+ movl $cpu_gdt_table,%eax
12973 movl $stack_canary,%ecx
12974+#ifdef CONFIG_SMP
12975+ addl $__per_cpu_load,%ecx
12976+#endif
12977 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12978 shrl $16, %ecx
12979 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12980 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12981 1:
12982-#endif
12983 movl $(__KERNEL_STACK_CANARY),%eax
12984+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12985+ movl $(__USER_DS),%eax
12986+#else
12987+ xorl %eax,%eax
12988+#endif
12989 movl %eax,%gs
12990
12991 xorl %eax,%eax # Clear LDT
12992@@ -558,22 +639,22 @@ early_page_fault:
12993 jmp early_fault
12994
12995 early_fault:
12996- cld
12997 #ifdef CONFIG_PRINTK
12998+ cmpl $1,%ss:early_recursion_flag
12999+ je hlt_loop
13000+ incl %ss:early_recursion_flag
13001+ cld
13002 pusha
13003 movl $(__KERNEL_DS),%eax
13004 movl %eax,%ds
13005 movl %eax,%es
13006- cmpl $2,early_recursion_flag
13007- je hlt_loop
13008- incl early_recursion_flag
13009 movl %cr2,%eax
13010 pushl %eax
13011 pushl %edx /* trapno */
13012 pushl $fault_msg
13013 call printk
13014+; call dump_stack
13015 #endif
13016- call dump_stack
13017 hlt_loop:
13018 hlt
13019 jmp hlt_loop
13020@@ -581,8 +662,11 @@ hlt_loop:
13021 /* This is the default interrupt "handler" :-) */
13022 ALIGN
13023 ignore_int:
13024- cld
13025 #ifdef CONFIG_PRINTK
13026+ cmpl $2,%ss:early_recursion_flag
13027+ je hlt_loop
13028+ incl %ss:early_recursion_flag
13029+ cld
13030 pushl %eax
13031 pushl %ecx
13032 pushl %edx
13033@@ -591,9 +675,6 @@ ignore_int:
13034 movl $(__KERNEL_DS),%eax
13035 movl %eax,%ds
13036 movl %eax,%es
13037- cmpl $2,early_recursion_flag
13038- je hlt_loop
13039- incl early_recursion_flag
13040 pushl 16(%esp)
13041 pushl 24(%esp)
13042 pushl 32(%esp)
13043@@ -622,29 +703,43 @@ ENTRY(initial_code)
13044 /*
13045 * BSS section
13046 */
13047-__PAGE_ALIGNED_BSS
13048- .align PAGE_SIZE
13049 #ifdef CONFIG_X86_PAE
13050+.section .initial_pg_pmd,"a",@progbits
13051 initial_pg_pmd:
13052 .fill 1024*KPMDS,4,0
13053 #else
13054+.section .initial_page_table,"a",@progbits
13055 ENTRY(initial_page_table)
13056 .fill 1024,4,0
13057 #endif
13058+.section .initial_pg_fixmap,"a",@progbits
13059 initial_pg_fixmap:
13060 .fill 1024,4,0
13061+.section .empty_zero_page,"a",@progbits
13062 ENTRY(empty_zero_page)
13063 .fill 4096,1,0
13064+.section .swapper_pg_dir,"a",@progbits
13065 ENTRY(swapper_pg_dir)
13066+#ifdef CONFIG_X86_PAE
13067+ .fill 4,8,0
13068+#else
13069 .fill 1024,4,0
13070+#endif
13071+
13072+/*
13073+ * The IDT has to be page-aligned to simplify the Pentium
13074+ * F0 0F bug workaround.. We have a special link segment
13075+ * for this.
13076+ */
13077+.section .idt,"a",@progbits
13078+ENTRY(idt_table)
13079+ .fill 256,8,0
13080
13081 /*
13082 * This starts the data section.
13083 */
13084 #ifdef CONFIG_X86_PAE
13085-__PAGE_ALIGNED_DATA
13086- /* Page-aligned for the benefit of paravirt? */
13087- .align PAGE_SIZE
13088+.section .initial_page_table,"a",@progbits
13089 ENTRY(initial_page_table)
13090 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13091 # if KPMDS == 3
13092@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13093 # error "Kernel PMDs should be 1, 2 or 3"
13094 # endif
13095 .align PAGE_SIZE /* needs to be page-sized too */
13096+
13097+#ifdef CONFIG_PAX_PER_CPU_PGD
13098+ENTRY(cpu_pgd)
13099+ .rept NR_CPUS
13100+ .fill 4,8,0
13101+ .endr
13102+#endif
13103+
13104 #endif
13105
13106 .data
13107 .balign 4
13108 ENTRY(stack_start)
13109- .long init_thread_union+THREAD_SIZE
13110+ .long init_thread_union+THREAD_SIZE-8
13111+
13112+ready: .byte 0
13113
13114+.section .rodata,"a",@progbits
13115 early_recursion_flag:
13116 .long 0
13117
13118-ready: .byte 0
13119-
13120 int_msg:
13121 .asciz "Unknown interrupt or fault at: %p %p %p\n"
13122
13123@@ -707,7 +811,7 @@ fault_msg:
13124 .word 0 # 32 bit align gdt_desc.address
13125 boot_gdt_descr:
13126 .word __BOOT_DS+7
13127- .long boot_gdt - __PAGE_OFFSET
13128+ .long pa(boot_gdt)
13129
13130 .word 0 # 32-bit align idt_desc.address
13131 idt_descr:
13132@@ -718,7 +822,7 @@ idt_descr:
13133 .word 0 # 32 bit align gdt_desc.address
13134 ENTRY(early_gdt_descr)
13135 .word GDT_ENTRIES*8-1
13136- .long gdt_page /* Overwritten for secondary CPUs */
13137+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13138
13139 /*
13140 * The boot_gdt must mirror the equivalent in setup.S and is
13141@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13142 .align L1_CACHE_BYTES
13143 ENTRY(boot_gdt)
13144 .fill GDT_ENTRY_BOOT_CS,8,0
13145- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13146- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13147+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13148+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13149+
13150+ .align PAGE_SIZE_asm
13151+ENTRY(cpu_gdt_table)
13152+ .rept NR_CPUS
13153+ .quad 0x0000000000000000 /* NULL descriptor */
13154+ .quad 0x0000000000000000 /* 0x0b reserved */
13155+ .quad 0x0000000000000000 /* 0x13 reserved */
13156+ .quad 0x0000000000000000 /* 0x1b reserved */
13157+
13158+#ifdef CONFIG_PAX_KERNEXEC
13159+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13160+#else
13161+ .quad 0x0000000000000000 /* 0x20 unused */
13162+#endif
13163+
13164+ .quad 0x0000000000000000 /* 0x28 unused */
13165+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13166+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13167+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13168+ .quad 0x0000000000000000 /* 0x4b reserved */
13169+ .quad 0x0000000000000000 /* 0x53 reserved */
13170+ .quad 0x0000000000000000 /* 0x5b reserved */
13171+
13172+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13173+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13174+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13175+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13176+
13177+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13178+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13179+
13180+ /*
13181+ * Segments used for calling PnP BIOS have byte granularity.
13182+ * The code segments and data segments have fixed 64k limits,
13183+ * the transfer segment sizes are set at run time.
13184+ */
13185+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13186+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13187+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13188+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13189+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13190+
13191+ /*
13192+ * The APM segments have byte granularity and their bases
13193+ * are set at run time. All have 64k limits.
13194+ */
13195+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13196+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13197+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13198+
13199+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13200+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13201+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13202+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13203+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13204+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13205+
13206+ /* Be sure this is zeroed to avoid false validations in Xen */
13207+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13208+ .endr
13209diff -urNp linux-3.0.7/arch/x86/kernel/head_64.S linux-3.0.7/arch/x86/kernel/head_64.S
13210--- linux-3.0.7/arch/x86/kernel/head_64.S 2011-07-21 22:17:23.000000000 -0400
13211+++ linux-3.0.7/arch/x86/kernel/head_64.S 2011-08-23 21:47:55.000000000 -0400
13212@@ -19,6 +19,7 @@
13213 #include <asm/cache.h>
13214 #include <asm/processor-flags.h>
13215 #include <asm/percpu.h>
13216+#include <asm/cpufeature.h>
13217
13218 #ifdef CONFIG_PARAVIRT
13219 #include <asm/asm-offsets.h>
13220@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13221 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13222 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13223 L3_START_KERNEL = pud_index(__START_KERNEL_map)
13224+L4_VMALLOC_START = pgd_index(VMALLOC_START)
13225+L3_VMALLOC_START = pud_index(VMALLOC_START)
13226+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13227+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13228
13229 .text
13230 __HEAD
13231@@ -85,35 +90,22 @@ startup_64:
13232 */
13233 addq %rbp, init_level4_pgt + 0(%rip)
13234 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13235+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13236+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13237 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13238
13239 addq %rbp, level3_ident_pgt + 0(%rip)
13240+#ifndef CONFIG_XEN
13241+ addq %rbp, level3_ident_pgt + 8(%rip)
13242+#endif
13243
13244- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13245- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13246+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13247
13248- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13249+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13250+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13251
13252- /* Add an Identity mapping if I am above 1G */
13253- leaq _text(%rip), %rdi
13254- andq $PMD_PAGE_MASK, %rdi
13255-
13256- movq %rdi, %rax
13257- shrq $PUD_SHIFT, %rax
13258- andq $(PTRS_PER_PUD - 1), %rax
13259- jz ident_complete
13260-
13261- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13262- leaq level3_ident_pgt(%rip), %rbx
13263- movq %rdx, 0(%rbx, %rax, 8)
13264-
13265- movq %rdi, %rax
13266- shrq $PMD_SHIFT, %rax
13267- andq $(PTRS_PER_PMD - 1), %rax
13268- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13269- leaq level2_spare_pgt(%rip), %rbx
13270- movq %rdx, 0(%rbx, %rax, 8)
13271-ident_complete:
13272+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13273+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13274
13275 /*
13276 * Fixup the kernel text+data virtual addresses. Note that
13277@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13278 * after the boot processor executes this code.
13279 */
13280
13281- /* Enable PAE mode and PGE */
13282- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13283+ /* Enable PAE mode and PSE/PGE */
13284+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13285 movq %rax, %cr4
13286
13287 /* Setup early boot stage 4 level pagetables. */
13288@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13289 movl $MSR_EFER, %ecx
13290 rdmsr
13291 btsl $_EFER_SCE, %eax /* Enable System Call */
13292- btl $20,%edi /* No Execute supported? */
13293+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13294 jnc 1f
13295 btsl $_EFER_NX, %eax
13296+ leaq init_level4_pgt(%rip), %rdi
13297+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13298+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13299+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13300+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13301 1: wrmsr /* Make changes effective */
13302
13303 /* Setup cr0 */
13304@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13305 bad_address:
13306 jmp bad_address
13307
13308- .section ".init.text","ax"
13309+ __INIT
13310 #ifdef CONFIG_EARLY_PRINTK
13311 .globl early_idt_handlers
13312 early_idt_handlers:
13313@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13314 #endif /* EARLY_PRINTK */
13315 1: hlt
13316 jmp 1b
13317+ .previous
13318
13319 #ifdef CONFIG_EARLY_PRINTK
13320+ __INITDATA
13321 early_recursion_flag:
13322 .long 0
13323+ .previous
13324
13325+ .section .rodata,"a",@progbits
13326 early_idt_msg:
13327 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13328 early_idt_ripmsg:
13329 .asciz "RIP %s\n"
13330-#endif /* CONFIG_EARLY_PRINTK */
13331 .previous
13332+#endif /* CONFIG_EARLY_PRINTK */
13333
13334+ .section .rodata,"a",@progbits
13335 #define NEXT_PAGE(name) \
13336 .balign PAGE_SIZE; \
13337 ENTRY(name)
13338@@ -338,7 +340,6 @@ ENTRY(name)
13339 i = i + 1 ; \
13340 .endr
13341
13342- .data
13343 /*
13344 * This default setting generates an ident mapping at address 0x100000
13345 * and a mapping for the kernel that precisely maps virtual address
13346@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13347 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13348 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13349 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13350+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13351+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13352+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13353+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13354 .org init_level4_pgt + L4_START_KERNEL*8, 0
13355 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13357
13358+#ifdef CONFIG_PAX_PER_CPU_PGD
13359+NEXT_PAGE(cpu_pgd)
13360+ .rept NR_CPUS
13361+ .fill 512,8,0
13362+ .endr
13363+#endif
13364+
13365 NEXT_PAGE(level3_ident_pgt)
13366 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13367+#ifdef CONFIG_XEN
13368 .fill 511,8,0
13369+#else
13370+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13371+ .fill 510,8,0
13372+#endif
13373+
13374+NEXT_PAGE(level3_vmalloc_pgt)
13375+ .fill 512,8,0
13376+
13377+NEXT_PAGE(level3_vmemmap_pgt)
13378+ .fill L3_VMEMMAP_START,8,0
13379+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13380
13381 NEXT_PAGE(level3_kernel_pgt)
13382 .fill L3_START_KERNEL,8,0
13383@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13384 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13385 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13386
13387+NEXT_PAGE(level2_vmemmap_pgt)
13388+ .fill 512,8,0
13389+
13390 NEXT_PAGE(level2_fixmap_pgt)
13391- .fill 506,8,0
13392- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13393- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13394- .fill 5,8,0
13395+ .fill 507,8,0
13396+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13397+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13398+ .fill 4,8,0
13399
13400-NEXT_PAGE(level1_fixmap_pgt)
13401+NEXT_PAGE(level1_vsyscall_pgt)
13402 .fill 512,8,0
13403
13404-NEXT_PAGE(level2_ident_pgt)
13405- /* Since I easily can, map the first 1G.
13406+ /* Since I easily can, map the first 2G.
13407 * Don't set NX because code runs from these pages.
13408 */
13409- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13410+NEXT_PAGE(level2_ident_pgt)
13411+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13412
13413 NEXT_PAGE(level2_kernel_pgt)
13414 /*
13415@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13416 * If you want to increase this then increase MODULES_VADDR
13417 * too.)
13418 */
13419- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13420- KERNEL_IMAGE_SIZE/PMD_SIZE)
13421-
13422-NEXT_PAGE(level2_spare_pgt)
13423- .fill 512, 8, 0
13424+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13425
13426 #undef PMDS
13427 #undef NEXT_PAGE
13428
13429- .data
13430+ .align PAGE_SIZE
13431+ENTRY(cpu_gdt_table)
13432+ .rept NR_CPUS
13433+ .quad 0x0000000000000000 /* NULL descriptor */
13434+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13435+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13436+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13437+ .quad 0x00cffb000000ffff /* __USER32_CS */
13438+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13439+ .quad 0x00affb000000ffff /* __USER_CS */
13440+
13441+#ifdef CONFIG_PAX_KERNEXEC
13442+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13443+#else
13444+ .quad 0x0 /* unused */
13445+#endif
13446+
13447+ .quad 0,0 /* TSS */
13448+ .quad 0,0 /* LDT */
13449+ .quad 0,0,0 /* three TLS descriptors */
13450+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
13451+ /* asm/segment.h:GDT_ENTRIES must match this */
13452+
13453+ /* zero the remaining page */
13454+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13455+ .endr
13456+
13457 .align 16
13458 .globl early_gdt_descr
13459 early_gdt_descr:
13460 .word GDT_ENTRIES*8-1
13461 early_gdt_descr_base:
13462- .quad INIT_PER_CPU_VAR(gdt_page)
13463+ .quad cpu_gdt_table
13464
13465 ENTRY(phys_base)
13466 /* This must match the first entry in level2_kernel_pgt */
13467 .quad 0x0000000000000000
13468
13469 #include "../../x86/xen/xen-head.S"
13470-
13471- .section .bss, "aw", @nobits
13472+
13473+ .section .rodata,"a",@progbits
13474 .align L1_CACHE_BYTES
13475 ENTRY(idt_table)
13476- .skip IDT_ENTRIES * 16
13477+ .fill 512,8,0
13478
13479 __PAGE_ALIGNED_BSS
13480 .align PAGE_SIZE
13481diff -urNp linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c
13482--- linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-07-21 22:17:23.000000000 -0400
13483+++ linux-3.0.7/arch/x86/kernel/i386_ksyms_32.c 2011-08-23 21:47:55.000000000 -0400
13484@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13485 EXPORT_SYMBOL(cmpxchg8b_emu);
13486 #endif
13487
13488+EXPORT_SYMBOL_GPL(cpu_gdt_table);
13489+
13490 /* Networking helper routines. */
13491 EXPORT_SYMBOL(csum_partial_copy_generic);
13492+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13493+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13494
13495 EXPORT_SYMBOL(__get_user_1);
13496 EXPORT_SYMBOL(__get_user_2);
13497@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13498
13499 EXPORT_SYMBOL(csum_partial);
13500 EXPORT_SYMBOL(empty_zero_page);
13501+
13502+#ifdef CONFIG_PAX_KERNEXEC
13503+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13504+#endif
13505diff -urNp linux-3.0.7/arch/x86/kernel/i8259.c linux-3.0.7/arch/x86/kernel/i8259.c
13506--- linux-3.0.7/arch/x86/kernel/i8259.c 2011-07-21 22:17:23.000000000 -0400
13507+++ linux-3.0.7/arch/x86/kernel/i8259.c 2011-08-23 21:47:55.000000000 -0400
13508@@ -210,7 +210,7 @@ spurious_8259A_irq:
13509 "spurious 8259A interrupt: IRQ%d.\n", irq);
13510 spurious_irq_mask |= irqmask;
13511 }
13512- atomic_inc(&irq_err_count);
13513+ atomic_inc_unchecked(&irq_err_count);
13514 /*
13515 * Theoretically we do not have to handle this IRQ,
13516 * but in Linux this does not cause problems and is
13517diff -urNp linux-3.0.7/arch/x86/kernel/init_task.c linux-3.0.7/arch/x86/kernel/init_task.c
13518--- linux-3.0.7/arch/x86/kernel/init_task.c 2011-07-21 22:17:23.000000000 -0400
13519+++ linux-3.0.7/arch/x86/kernel/init_task.c 2011-08-23 21:47:55.000000000 -0400
13520@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13521 * way process stacks are handled. This is done by having a special
13522 * "init_task" linker map entry..
13523 */
13524-union thread_union init_thread_union __init_task_data =
13525- { INIT_THREAD_INFO(init_task) };
13526+union thread_union init_thread_union __init_task_data;
13527
13528 /*
13529 * Initial task structure.
13530@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13531 * section. Since TSS's are completely CPU-local, we want them
13532 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13533 */
13534-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13535-
13536+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13537+EXPORT_SYMBOL(init_tss);
13538diff -urNp linux-3.0.7/arch/x86/kernel/ioport.c linux-3.0.7/arch/x86/kernel/ioport.c
13539--- linux-3.0.7/arch/x86/kernel/ioport.c 2011-07-21 22:17:23.000000000 -0400
13540+++ linux-3.0.7/arch/x86/kernel/ioport.c 2011-08-23 21:48:14.000000000 -0400
13541@@ -6,6 +6,7 @@
13542 #include <linux/sched.h>
13543 #include <linux/kernel.h>
13544 #include <linux/capability.h>
13545+#include <linux/security.h>
13546 #include <linux/errno.h>
13547 #include <linux/types.h>
13548 #include <linux/ioport.h>
13549@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13550
13551 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13552 return -EINVAL;
13553+#ifdef CONFIG_GRKERNSEC_IO
13554+ if (turn_on && grsec_disable_privio) {
13555+ gr_handle_ioperm();
13556+ return -EPERM;
13557+ }
13558+#endif
13559 if (turn_on && !capable(CAP_SYS_RAWIO))
13560 return -EPERM;
13561
13562@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13563 * because the ->io_bitmap_max value must match the bitmap
13564 * contents:
13565 */
13566- tss = &per_cpu(init_tss, get_cpu());
13567+ tss = init_tss + get_cpu();
13568
13569 if (turn_on)
13570 bitmap_clear(t->io_bitmap_ptr, from, num);
13571@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13572 return -EINVAL;
13573 /* Trying to gain more privileges? */
13574 if (level > old) {
13575+#ifdef CONFIG_GRKERNSEC_IO
13576+ if (grsec_disable_privio) {
13577+ gr_handle_iopl();
13578+ return -EPERM;
13579+ }
13580+#endif
13581 if (!capable(CAP_SYS_RAWIO))
13582 return -EPERM;
13583 }
13584diff -urNp linux-3.0.7/arch/x86/kernel/irq_32.c linux-3.0.7/arch/x86/kernel/irq_32.c
13585--- linux-3.0.7/arch/x86/kernel/irq_32.c 2011-07-21 22:17:23.000000000 -0400
13586+++ linux-3.0.7/arch/x86/kernel/irq_32.c 2011-08-23 21:47:55.000000000 -0400
13587@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13588 __asm__ __volatile__("andl %%esp,%0" :
13589 "=r" (sp) : "0" (THREAD_SIZE - 1));
13590
13591- return sp < (sizeof(struct thread_info) + STACK_WARN);
13592+ return sp < STACK_WARN;
13593 }
13594
13595 static void print_stack_overflow(void)
13596@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13597 * per-CPU IRQ handling contexts (thread information and stack)
13598 */
13599 union irq_ctx {
13600- struct thread_info tinfo;
13601- u32 stack[THREAD_SIZE/sizeof(u32)];
13602+ unsigned long previous_esp;
13603+ u32 stack[THREAD_SIZE/sizeof(u32)];
13604 } __attribute__((aligned(THREAD_SIZE)));
13605
13606 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13607@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13608 static inline int
13609 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13610 {
13611- union irq_ctx *curctx, *irqctx;
13612+ union irq_ctx *irqctx;
13613 u32 *isp, arg1, arg2;
13614
13615- curctx = (union irq_ctx *) current_thread_info();
13616 irqctx = __this_cpu_read(hardirq_ctx);
13617
13618 /*
13619@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13620 * handler) we can't do that and just have to keep using the
13621 * current stack (which is the irq stack already after all)
13622 */
13623- if (unlikely(curctx == irqctx))
13624+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13625 return 0;
13626
13627 /* build the stack frame on the IRQ stack */
13628- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13629- irqctx->tinfo.task = curctx->tinfo.task;
13630- irqctx->tinfo.previous_esp = current_stack_pointer;
13631+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13632+ irqctx->previous_esp = current_stack_pointer;
13633
13634- /*
13635- * Copy the softirq bits in preempt_count so that the
13636- * softirq checks work in the hardirq context.
13637- */
13638- irqctx->tinfo.preempt_count =
13639- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13640- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13641+#ifdef CONFIG_PAX_MEMORY_UDEREF
13642+ __set_fs(MAKE_MM_SEG(0));
13643+#endif
13644
13645 if (unlikely(overflow))
13646 call_on_stack(print_stack_overflow, isp);
13647@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13648 : "0" (irq), "1" (desc), "2" (isp),
13649 "D" (desc->handle_irq)
13650 : "memory", "cc", "ecx");
13651+
13652+#ifdef CONFIG_PAX_MEMORY_UDEREF
13653+ __set_fs(current_thread_info()->addr_limit);
13654+#endif
13655+
13656 return 1;
13657 }
13658
13659@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13660 */
13661 void __cpuinit irq_ctx_init(int cpu)
13662 {
13663- union irq_ctx *irqctx;
13664-
13665 if (per_cpu(hardirq_ctx, cpu))
13666 return;
13667
13668- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13669- THREAD_FLAGS,
13670- THREAD_ORDER));
13671- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13672- irqctx->tinfo.cpu = cpu;
13673- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13674- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13675-
13676- per_cpu(hardirq_ctx, cpu) = irqctx;
13677-
13678- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13679- THREAD_FLAGS,
13680- THREAD_ORDER));
13681- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13682- irqctx->tinfo.cpu = cpu;
13683- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13684-
13685- per_cpu(softirq_ctx, cpu) = irqctx;
13686+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13687+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13688
13689 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13690 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13691@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13692 asmlinkage void do_softirq(void)
13693 {
13694 unsigned long flags;
13695- struct thread_info *curctx;
13696 union irq_ctx *irqctx;
13697 u32 *isp;
13698
13699@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13700 local_irq_save(flags);
13701
13702 if (local_softirq_pending()) {
13703- curctx = current_thread_info();
13704 irqctx = __this_cpu_read(softirq_ctx);
13705- irqctx->tinfo.task = curctx->task;
13706- irqctx->tinfo.previous_esp = current_stack_pointer;
13707+ irqctx->previous_esp = current_stack_pointer;
13708
13709 /* build the stack frame on the softirq stack */
13710- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13711+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13712+
13713+#ifdef CONFIG_PAX_MEMORY_UDEREF
13714+ __set_fs(MAKE_MM_SEG(0));
13715+#endif
13716
13717 call_on_stack(__do_softirq, isp);
13718+
13719+#ifdef CONFIG_PAX_MEMORY_UDEREF
13720+ __set_fs(current_thread_info()->addr_limit);
13721+#endif
13722+
13723 /*
13724 * Shouldn't happen, we returned above if in_interrupt():
13725 */
13726diff -urNp linux-3.0.7/arch/x86/kernel/irq.c linux-3.0.7/arch/x86/kernel/irq.c
13727--- linux-3.0.7/arch/x86/kernel/irq.c 2011-07-21 22:17:23.000000000 -0400
13728+++ linux-3.0.7/arch/x86/kernel/irq.c 2011-08-23 21:47:55.000000000 -0400
13729@@ -17,7 +17,7 @@
13730 #include <asm/mce.h>
13731 #include <asm/hw_irq.h>
13732
13733-atomic_t irq_err_count;
13734+atomic_unchecked_t irq_err_count;
13735
13736 /* Function pointer for generic interrupt vector handling */
13737 void (*x86_platform_ipi_callback)(void) = NULL;
13738@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13739 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13740 seq_printf(p, " Machine check polls\n");
13741 #endif
13742- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13743+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13744 #if defined(CONFIG_X86_IO_APIC)
13745- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13746+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13747 #endif
13748 return 0;
13749 }
13750@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13751
13752 u64 arch_irq_stat(void)
13753 {
13754- u64 sum = atomic_read(&irq_err_count);
13755+ u64 sum = atomic_read_unchecked(&irq_err_count);
13756
13757 #ifdef CONFIG_X86_IO_APIC
13758- sum += atomic_read(&irq_mis_count);
13759+ sum += atomic_read_unchecked(&irq_mis_count);
13760 #endif
13761 return sum;
13762 }
13763diff -urNp linux-3.0.7/arch/x86/kernel/kgdb.c linux-3.0.7/arch/x86/kernel/kgdb.c
13764--- linux-3.0.7/arch/x86/kernel/kgdb.c 2011-07-21 22:17:23.000000000 -0400
13765+++ linux-3.0.7/arch/x86/kernel/kgdb.c 2011-08-23 21:47:55.000000000 -0400
13766@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13767 #ifdef CONFIG_X86_32
13768 switch (regno) {
13769 case GDB_SS:
13770- if (!user_mode_vm(regs))
13771+ if (!user_mode(regs))
13772 *(unsigned long *)mem = __KERNEL_DS;
13773 break;
13774 case GDB_SP:
13775- if (!user_mode_vm(regs))
13776+ if (!user_mode(regs))
13777 *(unsigned long *)mem = kernel_stack_pointer(regs);
13778 break;
13779 case GDB_GS:
13780@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13781 case 'k':
13782 /* clear the trace bit */
13783 linux_regs->flags &= ~X86_EFLAGS_TF;
13784- atomic_set(&kgdb_cpu_doing_single_step, -1);
13785+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13786
13787 /* set the trace bit if we're stepping */
13788 if (remcomInBuffer[0] == 's') {
13789 linux_regs->flags |= X86_EFLAGS_TF;
13790- atomic_set(&kgdb_cpu_doing_single_step,
13791+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13792 raw_smp_processor_id());
13793 }
13794
13795@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13796 return NOTIFY_DONE;
13797
13798 case DIE_DEBUG:
13799- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13800+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13801 if (user_mode(regs))
13802 return single_step_cont(regs, args);
13803 break;
13804diff -urNp linux-3.0.7/arch/x86/kernel/kprobes.c linux-3.0.7/arch/x86/kernel/kprobes.c
13805--- linux-3.0.7/arch/x86/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
13806+++ linux-3.0.7/arch/x86/kernel/kprobes.c 2011-10-11 10:44:33.000000000 -0400
13807@@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13808 } __attribute__((packed)) *insn;
13809
13810 insn = (struct __arch_relative_insn *)from;
13811+
13812+ pax_open_kernel();
13813 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13814 insn->op = op;
13815+ pax_close_kernel();
13816 }
13817
13818 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13819@@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13820 kprobe_opcode_t opcode;
13821 kprobe_opcode_t *orig_opcodes = opcodes;
13822
13823- if (search_exception_tables((unsigned long)opcodes))
13824+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13825 return 0; /* Page fault may occur on this address. */
13826
13827 retry:
13828@@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13829 }
13830 }
13831 insn_get_length(&insn);
13832+ pax_open_kernel();
13833 memcpy(dest, insn.kaddr, insn.length);
13834+ pax_close_kernel();
13835
13836 #ifdef CONFIG_X86_64
13837 if (insn_rip_relative(&insn)) {
13838@@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13839 (u8 *) dest;
13840 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13841 disp = (u8 *) dest + insn_offset_displacement(&insn);
13842+ pax_open_kernel();
13843 *(s32 *) disp = (s32) newdisp;
13844+ pax_close_kernel();
13845 }
13846 #endif
13847 return insn.length;
13848@@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13849 */
13850 __copy_instruction(p->ainsn.insn, p->addr, 0);
13851
13852- if (can_boost(p->addr))
13853+ if (can_boost(ktla_ktva(p->addr)))
13854 p->ainsn.boostable = 0;
13855 else
13856 p->ainsn.boostable = -1;
13857
13858- p->opcode = *p->addr;
13859+ p->opcode = *(ktla_ktva(p->addr));
13860 }
13861
13862 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13863@@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13864 * nor set current_kprobe, because it doesn't use single
13865 * stepping.
13866 */
13867- regs->ip = (unsigned long)p->ainsn.insn;
13868+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13869 preempt_enable_no_resched();
13870 return;
13871 }
13872@@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13873 if (p->opcode == BREAKPOINT_INSTRUCTION)
13874 regs->ip = (unsigned long)p->addr;
13875 else
13876- regs->ip = (unsigned long)p->ainsn.insn;
13877+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13878 }
13879
13880 /*
13881@@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13882 setup_singlestep(p, regs, kcb, 0);
13883 return 1;
13884 }
13885- } else if (*addr != BREAKPOINT_INSTRUCTION) {
13886+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13887 /*
13888 * The breakpoint instruction was removed right
13889 * after we hit it. Another cpu has removed
13890@@ -680,6 +687,9 @@ static void __used __kprobes kretprobe_t
13891 " movq %rax, 152(%rsp)\n"
13892 RESTORE_REGS_STRING
13893 " popfq\n"
13894+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
13895+ " btsq $63,(%rsp)\n"
13896+#endif
13897 #else
13898 " pushf\n"
13899 SAVE_REGS_STRING
13900@@ -817,7 +827,7 @@ static void __kprobes resume_execution(s
13901 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13902 {
13903 unsigned long *tos = stack_addr(regs);
13904- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13905+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13906 unsigned long orig_ip = (unsigned long)p->addr;
13907 kprobe_opcode_t *insn = p->ainsn.insn;
13908
13909@@ -999,7 +1009,7 @@ int __kprobes kprobe_exceptions_notify(s
13910 struct die_args *args = data;
13911 int ret = NOTIFY_DONE;
13912
13913- if (args->regs && user_mode_vm(args->regs))
13914+ if (args->regs && user_mode(args->regs))
13915 return ret;
13916
13917 switch (val) {
13918@@ -1381,7 +1391,7 @@ int __kprobes arch_prepare_optimized_kpr
13919 * Verify if the address gap is in 2GB range, because this uses
13920 * a relative jump.
13921 */
13922- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13923+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13924 if (abs(rel) > 0x7fffffff)
13925 return -ERANGE;
13926
13927@@ -1402,11 +1412,11 @@ int __kprobes arch_prepare_optimized_kpr
13928 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13929
13930 /* Set probe function call */
13931- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13932+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13933
13934 /* Set returning jmp instruction at the tail of out-of-line buffer */
13935 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13936- (u8 *)op->kp.addr + op->optinsn.size);
13937+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13938
13939 flush_icache_range((unsigned long) buf,
13940 (unsigned long) buf + TMPL_END_IDX +
13941@@ -1428,7 +1438,7 @@ static void __kprobes setup_optimize_kpr
13942 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13943
13944 /* Backup instructions which will be replaced by jump address */
13945- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13946+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13947 RELATIVE_ADDR_SIZE);
13948
13949 insn_buf[0] = RELATIVEJUMP_OPCODE;
13950diff -urNp linux-3.0.7/arch/x86/kernel/kvm.c linux-3.0.7/arch/x86/kernel/kvm.c
13951--- linux-3.0.7/arch/x86/kernel/kvm.c 2011-07-21 22:17:23.000000000 -0400
13952+++ linux-3.0.7/arch/x86/kernel/kvm.c 2011-08-24 18:10:12.000000000 -0400
13953@@ -426,6 +426,7 @@ static void __init paravirt_ops_setup(vo
13954 pv_mmu_ops.set_pud = kvm_set_pud;
13955 #if PAGETABLE_LEVELS == 4
13956 pv_mmu_ops.set_pgd = kvm_set_pgd;
13957+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
13958 #endif
13959 #endif
13960 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
13961diff -urNp linux-3.0.7/arch/x86/kernel/ldt.c linux-3.0.7/arch/x86/kernel/ldt.c
13962--- linux-3.0.7/arch/x86/kernel/ldt.c 2011-07-21 22:17:23.000000000 -0400
13963+++ linux-3.0.7/arch/x86/kernel/ldt.c 2011-08-23 21:47:55.000000000 -0400
13964@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13965 if (reload) {
13966 #ifdef CONFIG_SMP
13967 preempt_disable();
13968- load_LDT(pc);
13969+ load_LDT_nolock(pc);
13970 if (!cpumask_equal(mm_cpumask(current->mm),
13971 cpumask_of(smp_processor_id())))
13972 smp_call_function(flush_ldt, current->mm, 1);
13973 preempt_enable();
13974 #else
13975- load_LDT(pc);
13976+ load_LDT_nolock(pc);
13977 #endif
13978 }
13979 if (oldsize) {
13980@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13981 return err;
13982
13983 for (i = 0; i < old->size; i++)
13984- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13985+ write_ldt_entry(new->ldt, i, old->ldt + i);
13986 return 0;
13987 }
13988
13989@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13990 retval = copy_ldt(&mm->context, &old_mm->context);
13991 mutex_unlock(&old_mm->context.lock);
13992 }
13993+
13994+ if (tsk == current) {
13995+ mm->context.vdso = 0;
13996+
13997+#ifdef CONFIG_X86_32
13998+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13999+ mm->context.user_cs_base = 0UL;
14000+ mm->context.user_cs_limit = ~0UL;
14001+
14002+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14003+ cpus_clear(mm->context.cpu_user_cs_mask);
14004+#endif
14005+
14006+#endif
14007+#endif
14008+
14009+ }
14010+
14011 return retval;
14012 }
14013
14014@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14015 }
14016 }
14017
14018+#ifdef CONFIG_PAX_SEGMEXEC
14019+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14020+ error = -EINVAL;
14021+ goto out_unlock;
14022+ }
14023+#endif
14024+
14025 fill_ldt(&ldt, &ldt_info);
14026 if (oldmode)
14027 ldt.avl = 0;
14028diff -urNp linux-3.0.7/arch/x86/kernel/machine_kexec_32.c linux-3.0.7/arch/x86/kernel/machine_kexec_32.c
14029--- linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-07-21 22:17:23.000000000 -0400
14030+++ linux-3.0.7/arch/x86/kernel/machine_kexec_32.c 2011-08-23 21:47:55.000000000 -0400
14031@@ -27,7 +27,7 @@
14032 #include <asm/cacheflush.h>
14033 #include <asm/debugreg.h>
14034
14035-static void set_idt(void *newidt, __u16 limit)
14036+static void set_idt(struct desc_struct *newidt, __u16 limit)
14037 {
14038 struct desc_ptr curidt;
14039
14040@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14041 }
14042
14043
14044-static void set_gdt(void *newgdt, __u16 limit)
14045+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14046 {
14047 struct desc_ptr curgdt;
14048
14049@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14050 }
14051
14052 control_page = page_address(image->control_code_page);
14053- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14054+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14055
14056 relocate_kernel_ptr = control_page;
14057 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14058diff -urNp linux-3.0.7/arch/x86/kernel/microcode_intel.c linux-3.0.7/arch/x86/kernel/microcode_intel.c
14059--- linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-07-21 22:17:23.000000000 -0400
14060+++ linux-3.0.7/arch/x86/kernel/microcode_intel.c 2011-10-06 04:17:55.000000000 -0400
14061@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14062
14063 static int get_ucode_user(void *to, const void *from, size_t n)
14064 {
14065- return copy_from_user(to, from, n);
14066+ return copy_from_user(to, (const void __force_user *)from, n);
14067 }
14068
14069 static enum ucode_state
14070 request_microcode_user(int cpu, const void __user *buf, size_t size)
14071 {
14072- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14073+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14074 }
14075
14076 static void microcode_fini_cpu(int cpu)
14077diff -urNp linux-3.0.7/arch/x86/kernel/module.c linux-3.0.7/arch/x86/kernel/module.c
14078--- linux-3.0.7/arch/x86/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
14079+++ linux-3.0.7/arch/x86/kernel/module.c 2011-08-23 21:47:55.000000000 -0400
14080@@ -36,21 +36,66 @@
14081 #define DEBUGP(fmt...)
14082 #endif
14083
14084-void *module_alloc(unsigned long size)
14085+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14086 {
14087 if (PAGE_ALIGN(size) > MODULES_LEN)
14088 return NULL;
14089 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14090- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14091+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14092 -1, __builtin_return_address(0));
14093 }
14094
14095+void *module_alloc(unsigned long size)
14096+{
14097+
14098+#ifdef CONFIG_PAX_KERNEXEC
14099+ return __module_alloc(size, PAGE_KERNEL);
14100+#else
14101+ return __module_alloc(size, PAGE_KERNEL_EXEC);
14102+#endif
14103+
14104+}
14105+
14106 /* Free memory returned from module_alloc */
14107 void module_free(struct module *mod, void *module_region)
14108 {
14109 vfree(module_region);
14110 }
14111
14112+#ifdef CONFIG_PAX_KERNEXEC
14113+#ifdef CONFIG_X86_32
14114+void *module_alloc_exec(unsigned long size)
14115+{
14116+ struct vm_struct *area;
14117+
14118+ if (size == 0)
14119+ return NULL;
14120+
14121+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14122+ return area ? area->addr : NULL;
14123+}
14124+EXPORT_SYMBOL(module_alloc_exec);
14125+
14126+void module_free_exec(struct module *mod, void *module_region)
14127+{
14128+ vunmap(module_region);
14129+}
14130+EXPORT_SYMBOL(module_free_exec);
14131+#else
14132+void module_free_exec(struct module *mod, void *module_region)
14133+{
14134+ module_free(mod, module_region);
14135+}
14136+EXPORT_SYMBOL(module_free_exec);
14137+
14138+void *module_alloc_exec(unsigned long size)
14139+{
14140+ return __module_alloc(size, PAGE_KERNEL_RX);
14141+}
14142+EXPORT_SYMBOL(module_alloc_exec);
14143+#endif
14144+#endif
14145+
14146 /* We don't need anything special. */
14147 int module_frob_arch_sections(Elf_Ehdr *hdr,
14148 Elf_Shdr *sechdrs,
14149@@ -70,14 +115,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14150 unsigned int i;
14151 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14152 Elf32_Sym *sym;
14153- uint32_t *location;
14154+ uint32_t *plocation, location;
14155
14156 DEBUGP("Applying relocate section %u to %u\n", relsec,
14157 sechdrs[relsec].sh_info);
14158 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14159 /* This is where to make the change */
14160- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14161- + rel[i].r_offset;
14162+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14163+ location = (uint32_t)plocation;
14164+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14165+ plocation = ktla_ktva((void *)plocation);
14166 /* This is the symbol it is referring to. Note that all
14167 undefined symbols have been resolved. */
14168 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14169@@ -86,11 +133,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14170 switch (ELF32_R_TYPE(rel[i].r_info)) {
14171 case R_386_32:
14172 /* We add the value into the location given */
14173- *location += sym->st_value;
14174+ pax_open_kernel();
14175+ *plocation += sym->st_value;
14176+ pax_close_kernel();
14177 break;
14178 case R_386_PC32:
14179 /* Add the value, subtract its postition */
14180- *location += sym->st_value - (uint32_t)location;
14181+ pax_open_kernel();
14182+ *plocation += sym->st_value - location;
14183+ pax_close_kernel();
14184 break;
14185 default:
14186 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14187@@ -146,21 +197,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14188 case R_X86_64_NONE:
14189 break;
14190 case R_X86_64_64:
14191+ pax_open_kernel();
14192 *(u64 *)loc = val;
14193+ pax_close_kernel();
14194 break;
14195 case R_X86_64_32:
14196+ pax_open_kernel();
14197 *(u32 *)loc = val;
14198+ pax_close_kernel();
14199 if (val != *(u32 *)loc)
14200 goto overflow;
14201 break;
14202 case R_X86_64_32S:
14203+ pax_open_kernel();
14204 *(s32 *)loc = val;
14205+ pax_close_kernel();
14206 if ((s64)val != *(s32 *)loc)
14207 goto overflow;
14208 break;
14209 case R_X86_64_PC32:
14210 val -= (u64)loc;
14211+ pax_open_kernel();
14212 *(u32 *)loc = val;
14213+ pax_close_kernel();
14214+
14215 #if 0
14216 if ((s64)val != *(s32 *)loc)
14217 goto overflow;
14218diff -urNp linux-3.0.7/arch/x86/kernel/paravirt.c linux-3.0.7/arch/x86/kernel/paravirt.c
14219--- linux-3.0.7/arch/x86/kernel/paravirt.c 2011-07-21 22:17:23.000000000 -0400
14220+++ linux-3.0.7/arch/x86/kernel/paravirt.c 2011-08-23 21:48:14.000000000 -0400
14221@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14222 {
14223 return x;
14224 }
14225+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14226+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14227+#endif
14228
14229 void __init default_banner(void)
14230 {
14231@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
14232 * corresponding structure. */
14233 static void *get_call_destination(u8 type)
14234 {
14235- struct paravirt_patch_template tmpl = {
14236+ const struct paravirt_patch_template tmpl = {
14237 .pv_init_ops = pv_init_ops,
14238 .pv_time_ops = pv_time_ops,
14239 .pv_cpu_ops = pv_cpu_ops,
14240@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14241 .pv_lock_ops = pv_lock_ops,
14242 #endif
14243 };
14244+
14245+ pax_track_stack();
14246+
14247 return *((void **)&tmpl + type);
14248 }
14249
14250@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14251 if (opfunc == NULL)
14252 /* If there's no function, patch it with a ud2a (BUG) */
14253 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14254- else if (opfunc == _paravirt_nop)
14255+ else if (opfunc == (void *)_paravirt_nop)
14256 /* If the operation is a nop, then nop the callsite */
14257 ret = paravirt_patch_nop();
14258
14259 /* identity functions just return their single argument */
14260- else if (opfunc == _paravirt_ident_32)
14261+ else if (opfunc == (void *)_paravirt_ident_32)
14262 ret = paravirt_patch_ident_32(insnbuf, len);
14263- else if (opfunc == _paravirt_ident_64)
14264+ else if (opfunc == (void *)_paravirt_ident_64)
14265 ret = paravirt_patch_ident_64(insnbuf, len);
14266+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14267+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14268+ ret = paravirt_patch_ident_64(insnbuf, len);
14269+#endif
14270
14271 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14272 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14273@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14274 if (insn_len > len || start == NULL)
14275 insn_len = len;
14276 else
14277- memcpy(insnbuf, start, insn_len);
14278+ memcpy(insnbuf, ktla_ktva(start), insn_len);
14279
14280 return insn_len;
14281 }
14282@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
14283 preempt_enable();
14284 }
14285
14286-struct pv_info pv_info = {
14287+struct pv_info pv_info __read_only = {
14288 .name = "bare hardware",
14289 .paravirt_enabled = 0,
14290 .kernel_rpl = 0,
14291 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
14292 };
14293
14294-struct pv_init_ops pv_init_ops = {
14295+struct pv_init_ops pv_init_ops __read_only = {
14296 .patch = native_patch,
14297 };
14298
14299-struct pv_time_ops pv_time_ops = {
14300+struct pv_time_ops pv_time_ops __read_only = {
14301 .sched_clock = native_sched_clock,
14302 };
14303
14304-struct pv_irq_ops pv_irq_ops = {
14305+struct pv_irq_ops pv_irq_ops __read_only = {
14306 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14307 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14308 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14309@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
14310 #endif
14311 };
14312
14313-struct pv_cpu_ops pv_cpu_ops = {
14314+struct pv_cpu_ops pv_cpu_ops __read_only = {
14315 .cpuid = native_cpuid,
14316 .get_debugreg = native_get_debugreg,
14317 .set_debugreg = native_set_debugreg,
14318@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14319 .end_context_switch = paravirt_nop,
14320 };
14321
14322-struct pv_apic_ops pv_apic_ops = {
14323+struct pv_apic_ops pv_apic_ops __read_only = {
14324 #ifdef CONFIG_X86_LOCAL_APIC
14325 .startup_ipi_hook = paravirt_nop,
14326 #endif
14327 };
14328
14329-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14330+#ifdef CONFIG_X86_32
14331+#ifdef CONFIG_X86_PAE
14332+/* 64-bit pagetable entries */
14333+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14334+#else
14335 /* 32-bit pagetable entries */
14336 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14337+#endif
14338 #else
14339 /* 64-bit pagetable entries */
14340 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14341 #endif
14342
14343-struct pv_mmu_ops pv_mmu_ops = {
14344+struct pv_mmu_ops pv_mmu_ops __read_only = {
14345
14346 .read_cr2 = native_read_cr2,
14347 .write_cr2 = native_write_cr2,
14348@@ -446,6 +461,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14349 .make_pud = PTE_IDENT,
14350
14351 .set_pgd = native_set_pgd,
14352+ .set_pgd_batched = native_set_pgd_batched,
14353 #endif
14354 #endif /* PAGETABLE_LEVELS >= 3 */
14355
14356@@ -465,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14357 },
14358
14359 .set_fixmap = native_set_fixmap,
14360+
14361+#ifdef CONFIG_PAX_KERNEXEC
14362+ .pax_open_kernel = native_pax_open_kernel,
14363+ .pax_close_kernel = native_pax_close_kernel,
14364+#endif
14365+
14366 };
14367
14368 EXPORT_SYMBOL_GPL(pv_time_ops);
14369diff -urNp linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c
14370--- linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-07-21 22:17:23.000000000 -0400
14371+++ linux-3.0.7/arch/x86/kernel/paravirt-spinlocks.c 2011-08-23 21:47:55.000000000 -0400
14372@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14373 arch_spin_lock(lock);
14374 }
14375
14376-struct pv_lock_ops pv_lock_ops = {
14377+struct pv_lock_ops pv_lock_ops __read_only = {
14378 #ifdef CONFIG_SMP
14379 .spin_is_locked = __ticket_spin_is_locked,
14380 .spin_is_contended = __ticket_spin_is_contended,
14381diff -urNp linux-3.0.7/arch/x86/kernel/pci-iommu_table.c linux-3.0.7/arch/x86/kernel/pci-iommu_table.c
14382--- linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-07-21 22:17:23.000000000 -0400
14383+++ linux-3.0.7/arch/x86/kernel/pci-iommu_table.c 2011-08-23 21:48:14.000000000 -0400
14384@@ -2,7 +2,7 @@
14385 #include <asm/iommu_table.h>
14386 #include <linux/string.h>
14387 #include <linux/kallsyms.h>
14388-
14389+#include <linux/sched.h>
14390
14391 #define DEBUG 1
14392
14393@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
14394 {
14395 struct iommu_table_entry *p, *q, *x;
14396
14397+ pax_track_stack();
14398+
14399 /* Simple cyclic dependency checker. */
14400 for (p = start; p < finish; p++) {
14401 q = find_dependents_of(start, finish, p);
14402diff -urNp linux-3.0.7/arch/x86/kernel/process_32.c linux-3.0.7/arch/x86/kernel/process_32.c
14403--- linux-3.0.7/arch/x86/kernel/process_32.c 2011-07-21 22:17:23.000000000 -0400
14404+++ linux-3.0.7/arch/x86/kernel/process_32.c 2011-08-23 21:47:55.000000000 -0400
14405@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14406 unsigned long thread_saved_pc(struct task_struct *tsk)
14407 {
14408 return ((unsigned long *)tsk->thread.sp)[3];
14409+//XXX return tsk->thread.eip;
14410 }
14411
14412 #ifndef CONFIG_SMP
14413@@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14414 unsigned long sp;
14415 unsigned short ss, gs;
14416
14417- if (user_mode_vm(regs)) {
14418+ if (user_mode(regs)) {
14419 sp = regs->sp;
14420 ss = regs->ss & 0xffff;
14421- gs = get_user_gs(regs);
14422 } else {
14423 sp = kernel_stack_pointer(regs);
14424 savesegment(ss, ss);
14425- savesegment(gs, gs);
14426 }
14427+ gs = get_user_gs(regs);
14428
14429 show_regs_common();
14430
14431@@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14432 struct task_struct *tsk;
14433 int err;
14434
14435- childregs = task_pt_regs(p);
14436+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14437 *childregs = *regs;
14438 childregs->ax = 0;
14439 childregs->sp = sp;
14440
14441 p->thread.sp = (unsigned long) childregs;
14442 p->thread.sp0 = (unsigned long) (childregs+1);
14443+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14444
14445 p->thread.ip = (unsigned long) ret_from_fork;
14446
14447@@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14448 struct thread_struct *prev = &prev_p->thread,
14449 *next = &next_p->thread;
14450 int cpu = smp_processor_id();
14451- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14452+ struct tss_struct *tss = init_tss + cpu;
14453 bool preload_fpu;
14454
14455 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14456@@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14457 */
14458 lazy_save_gs(prev->gs);
14459
14460+#ifdef CONFIG_PAX_MEMORY_UDEREF
14461+ __set_fs(task_thread_info(next_p)->addr_limit);
14462+#endif
14463+
14464 /*
14465 * Load the per-thread Thread-Local Storage descriptor.
14466 */
14467@@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14468 */
14469 arch_end_context_switch(next_p);
14470
14471+ percpu_write(current_task, next_p);
14472+ percpu_write(current_tinfo, &next_p->tinfo);
14473+
14474 if (preload_fpu)
14475 __math_state_restore();
14476
14477@@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14478 if (prev->gs | next->gs)
14479 lazy_load_gs(next->gs);
14480
14481- percpu_write(current_task, next_p);
14482-
14483 return prev_p;
14484 }
14485
14486@@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14487 } while (count++ < 16);
14488 return 0;
14489 }
14490-
14491diff -urNp linux-3.0.7/arch/x86/kernel/process_64.c linux-3.0.7/arch/x86/kernel/process_64.c
14492--- linux-3.0.7/arch/x86/kernel/process_64.c 2011-07-21 22:17:23.000000000 -0400
14493+++ linux-3.0.7/arch/x86/kernel/process_64.c 2011-08-23 21:47:55.000000000 -0400
14494@@ -87,7 +87,7 @@ static void __exit_idle(void)
14495 void exit_idle(void)
14496 {
14497 /* idle loop has pid 0 */
14498- if (current->pid)
14499+ if (task_pid_nr(current))
14500 return;
14501 __exit_idle();
14502 }
14503@@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14504 struct pt_regs *childregs;
14505 struct task_struct *me = current;
14506
14507- childregs = ((struct pt_regs *)
14508- (THREAD_SIZE + task_stack_page(p))) - 1;
14509+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14510 *childregs = *regs;
14511
14512 childregs->ax = 0;
14513@@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14514 p->thread.sp = (unsigned long) childregs;
14515 p->thread.sp0 = (unsigned long) (childregs+1);
14516 p->thread.usersp = me->thread.usersp;
14517+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14518
14519 set_tsk_thread_flag(p, TIF_FORK);
14520
14521@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14522 struct thread_struct *prev = &prev_p->thread;
14523 struct thread_struct *next = &next_p->thread;
14524 int cpu = smp_processor_id();
14525- struct tss_struct *tss = &per_cpu(init_tss, cpu);
14526+ struct tss_struct *tss = init_tss + cpu;
14527 unsigned fsindex, gsindex;
14528 bool preload_fpu;
14529
14530@@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14531 prev->usersp = percpu_read(old_rsp);
14532 percpu_write(old_rsp, next->usersp);
14533 percpu_write(current_task, next_p);
14534+ percpu_write(current_tinfo, &next_p->tinfo);
14535
14536- percpu_write(kernel_stack,
14537- (unsigned long)task_stack_page(next_p) +
14538- THREAD_SIZE - KERNEL_STACK_OFFSET);
14539+ percpu_write(kernel_stack, next->sp0);
14540
14541 /*
14542 * Now maybe reload the debug registers and handle I/O bitmaps
14543@@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14544 if (!p || p == current || p->state == TASK_RUNNING)
14545 return 0;
14546 stack = (unsigned long)task_stack_page(p);
14547- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14548+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14549 return 0;
14550 fp = *(u64 *)(p->thread.sp);
14551 do {
14552- if (fp < (unsigned long)stack ||
14553- fp >= (unsigned long)stack+THREAD_SIZE)
14554+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14555 return 0;
14556 ip = *(u64 *)(fp+8);
14557 if (!in_sched_functions(ip))
14558diff -urNp linux-3.0.7/arch/x86/kernel/process.c linux-3.0.7/arch/x86/kernel/process.c
14559--- linux-3.0.7/arch/x86/kernel/process.c 2011-07-21 22:17:23.000000000 -0400
14560+++ linux-3.0.7/arch/x86/kernel/process.c 2011-08-30 18:23:52.000000000 -0400
14561@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14562
14563 void free_thread_info(struct thread_info *ti)
14564 {
14565- free_thread_xstate(ti->task);
14566 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14567 }
14568
14569+static struct kmem_cache *task_struct_cachep;
14570+
14571 void arch_task_cache_init(void)
14572 {
14573- task_xstate_cachep =
14574- kmem_cache_create("task_xstate", xstate_size,
14575+ /* create a slab on which task_structs can be allocated */
14576+ task_struct_cachep =
14577+ kmem_cache_create("task_struct", sizeof(struct task_struct),
14578+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14579+
14580+ task_xstate_cachep =
14581+ kmem_cache_create("task_xstate", xstate_size,
14582 __alignof__(union thread_xstate),
14583- SLAB_PANIC | SLAB_NOTRACK, NULL);
14584+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14585+}
14586+
14587+struct task_struct *alloc_task_struct_node(int node)
14588+{
14589+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14590+}
14591+
14592+void free_task_struct(struct task_struct *task)
14593+{
14594+ free_thread_xstate(task);
14595+ kmem_cache_free(task_struct_cachep, task);
14596 }
14597
14598 /*
14599@@ -70,7 +87,7 @@ void exit_thread(void)
14600 unsigned long *bp = t->io_bitmap_ptr;
14601
14602 if (bp) {
14603- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14604+ struct tss_struct *tss = init_tss + get_cpu();
14605
14606 t->io_bitmap_ptr = NULL;
14607 clear_thread_flag(TIF_IO_BITMAP);
14608@@ -106,7 +123,7 @@ void show_regs_common(void)
14609
14610 printk(KERN_CONT "\n");
14611 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14612- current->pid, current->comm, print_tainted(),
14613+ task_pid_nr(current), current->comm, print_tainted(),
14614 init_utsname()->release,
14615 (int)strcspn(init_utsname()->version, " "),
14616 init_utsname()->version);
14617@@ -120,6 +137,9 @@ void flush_thread(void)
14618 {
14619 struct task_struct *tsk = current;
14620
14621+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14622+ loadsegment(gs, 0);
14623+#endif
14624 flush_ptrace_hw_breakpoint(tsk);
14625 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14626 /*
14627@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14628 regs.di = (unsigned long) arg;
14629
14630 #ifdef CONFIG_X86_32
14631- regs.ds = __USER_DS;
14632- regs.es = __USER_DS;
14633+ regs.ds = __KERNEL_DS;
14634+ regs.es = __KERNEL_DS;
14635 regs.fs = __KERNEL_PERCPU;
14636- regs.gs = __KERNEL_STACK_CANARY;
14637+ savesegment(gs, regs.gs);
14638 #else
14639 regs.ss = __KERNEL_DS;
14640 #endif
14641@@ -403,7 +423,7 @@ void default_idle(void)
14642 EXPORT_SYMBOL(default_idle);
14643 #endif
14644
14645-void stop_this_cpu(void *dummy)
14646+__noreturn void stop_this_cpu(void *dummy)
14647 {
14648 local_irq_disable();
14649 /*
14650@@ -668,16 +688,37 @@ static int __init idle_setup(char *str)
14651 }
14652 early_param("idle", idle_setup);
14653
14654-unsigned long arch_align_stack(unsigned long sp)
14655+#ifdef CONFIG_PAX_RANDKSTACK
14656+void pax_randomize_kstack(struct pt_regs *regs)
14657 {
14658- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14659- sp -= get_random_int() % 8192;
14660- return sp & ~0xf;
14661-}
14662+ struct thread_struct *thread = &current->thread;
14663+ unsigned long time;
14664
14665-unsigned long arch_randomize_brk(struct mm_struct *mm)
14666-{
14667- unsigned long range_end = mm->brk + 0x02000000;
14668- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14669-}
14670+ if (!randomize_va_space)
14671+ return;
14672+
14673+ if (v8086_mode(regs))
14674+ return;
14675
14676+ rdtscl(time);
14677+
14678+ /* P4 seems to return a 0 LSB, ignore it */
14679+#ifdef CONFIG_MPENTIUM4
14680+ time &= 0x3EUL;
14681+ time <<= 2;
14682+#elif defined(CONFIG_X86_64)
14683+ time &= 0xFUL;
14684+ time <<= 4;
14685+#else
14686+ time &= 0x1FUL;
14687+ time <<= 3;
14688+#endif
14689+
14690+ thread->sp0 ^= time;
14691+ load_sp0(init_tss + smp_processor_id(), thread);
14692+
14693+#ifdef CONFIG_X86_64
14694+ percpu_write(kernel_stack, thread->sp0);
14695+#endif
14696+}
14697+#endif
14698diff -urNp linux-3.0.7/arch/x86/kernel/ptrace.c linux-3.0.7/arch/x86/kernel/ptrace.c
14699--- linux-3.0.7/arch/x86/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
14700+++ linux-3.0.7/arch/x86/kernel/ptrace.c 2011-08-23 21:47:55.000000000 -0400
14701@@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14702 unsigned long addr, unsigned long data)
14703 {
14704 int ret;
14705- unsigned long __user *datap = (unsigned long __user *)data;
14706+ unsigned long __user *datap = (__force unsigned long __user *)data;
14707
14708 switch (request) {
14709 /* read the word at location addr in the USER area. */
14710@@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14711 if ((int) addr < 0)
14712 return -EIO;
14713 ret = do_get_thread_area(child, addr,
14714- (struct user_desc __user *)data);
14715+ (__force struct user_desc __user *) data);
14716 break;
14717
14718 case PTRACE_SET_THREAD_AREA:
14719 if ((int) addr < 0)
14720 return -EIO;
14721 ret = do_set_thread_area(child, addr,
14722- (struct user_desc __user *)data, 0);
14723+ (__force struct user_desc __user *) data, 0);
14724 break;
14725 #endif
14726
14727@@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14728 memset(info, 0, sizeof(*info));
14729 info->si_signo = SIGTRAP;
14730 info->si_code = si_code;
14731- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14732+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14733 }
14734
14735 void user_single_step_siginfo(struct task_struct *tsk,
14736diff -urNp linux-3.0.7/arch/x86/kernel/pvclock.c linux-3.0.7/arch/x86/kernel/pvclock.c
14737--- linux-3.0.7/arch/x86/kernel/pvclock.c 2011-07-21 22:17:23.000000000 -0400
14738+++ linux-3.0.7/arch/x86/kernel/pvclock.c 2011-08-23 21:47:55.000000000 -0400
14739@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14740 return pv_tsc_khz;
14741 }
14742
14743-static atomic64_t last_value = ATOMIC64_INIT(0);
14744+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14745
14746 void pvclock_resume(void)
14747 {
14748- atomic64_set(&last_value, 0);
14749+ atomic64_set_unchecked(&last_value, 0);
14750 }
14751
14752 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14753@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14754 * updating at the same time, and one of them could be slightly behind,
14755 * making the assumption that last_value always go forward fail to hold.
14756 */
14757- last = atomic64_read(&last_value);
14758+ last = atomic64_read_unchecked(&last_value);
14759 do {
14760 if (ret < last)
14761 return last;
14762- last = atomic64_cmpxchg(&last_value, last, ret);
14763+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14764 } while (unlikely(last != ret));
14765
14766 return ret;
14767diff -urNp linux-3.0.7/arch/x86/kernel/reboot.c linux-3.0.7/arch/x86/kernel/reboot.c
14768--- linux-3.0.7/arch/x86/kernel/reboot.c 2011-07-21 22:17:23.000000000 -0400
14769+++ linux-3.0.7/arch/x86/kernel/reboot.c 2011-08-23 21:47:55.000000000 -0400
14770@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14771 EXPORT_SYMBOL(pm_power_off);
14772
14773 static const struct desc_ptr no_idt = {};
14774-static int reboot_mode;
14775+static unsigned short reboot_mode;
14776 enum reboot_type reboot_type = BOOT_ACPI;
14777 int reboot_force;
14778
14779@@ -315,13 +315,17 @@ core_initcall(reboot_init);
14780 extern const unsigned char machine_real_restart_asm[];
14781 extern const u64 machine_real_restart_gdt[3];
14782
14783-void machine_real_restart(unsigned int type)
14784+__noreturn void machine_real_restart(unsigned int type)
14785 {
14786 void *restart_va;
14787 unsigned long restart_pa;
14788- void (*restart_lowmem)(unsigned int);
14789+ void (* __noreturn restart_lowmem)(unsigned int);
14790 u64 *lowmem_gdt;
14791
14792+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14793+ struct desc_struct *gdt;
14794+#endif
14795+
14796 local_irq_disable();
14797
14798 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14799@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
14800 boot)". This seems like a fairly standard thing that gets set by
14801 REBOOT.COM programs, and the previous reset routine did this
14802 too. */
14803- *((unsigned short *)0x472) = reboot_mode;
14804+ *(unsigned short *)(__va(0x472)) = reboot_mode;
14805
14806 /* Patch the GDT in the low memory trampoline */
14807 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14808
14809 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14810 restart_pa = virt_to_phys(restart_va);
14811- restart_lowmem = (void (*)(unsigned int))restart_pa;
14812+ restart_lowmem = (void *)restart_pa;
14813
14814 /* GDT[0]: GDT self-pointer */
14815 lowmem_gdt[0] =
14816@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
14817 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14818
14819 /* Jump to the identity-mapped low memory code */
14820+
14821+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14822+ gdt = get_cpu_gdt_table(smp_processor_id());
14823+ pax_open_kernel();
14824+#ifdef CONFIG_PAX_MEMORY_UDEREF
14825+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14826+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14827+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14828+#endif
14829+#ifdef CONFIG_PAX_KERNEXEC
14830+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14831+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14832+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14833+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14834+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14835+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14836+#endif
14837+ pax_close_kernel();
14838+#endif
14839+
14840+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14841+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14842+ unreachable();
14843+#else
14844 restart_lowmem(type);
14845+#endif
14846+
14847 }
14848 #ifdef CONFIG_APM_MODULE
14849 EXPORT_SYMBOL(machine_real_restart);
14850@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
14851 * try to force a triple fault and then cycle between hitting the keyboard
14852 * controller and doing that
14853 */
14854-static void native_machine_emergency_restart(void)
14855+__noreturn static void native_machine_emergency_restart(void)
14856 {
14857 int i;
14858 int attempt = 0;
14859@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
14860 #endif
14861 }
14862
14863-static void __machine_emergency_restart(int emergency)
14864+static __noreturn void __machine_emergency_restart(int emergency)
14865 {
14866 reboot_emergency = emergency;
14867 machine_ops.emergency_restart();
14868 }
14869
14870-static void native_machine_restart(char *__unused)
14871+static __noreturn void native_machine_restart(char *__unused)
14872 {
14873 printk("machine restart\n");
14874
14875@@ -662,7 +692,7 @@ static void native_machine_restart(char
14876 __machine_emergency_restart(0);
14877 }
14878
14879-static void native_machine_halt(void)
14880+static __noreturn void native_machine_halt(void)
14881 {
14882 /* stop other cpus and apics */
14883 machine_shutdown();
14884@@ -673,7 +703,7 @@ static void native_machine_halt(void)
14885 stop_this_cpu(NULL);
14886 }
14887
14888-static void native_machine_power_off(void)
14889+__noreturn static void native_machine_power_off(void)
14890 {
14891 if (pm_power_off) {
14892 if (!reboot_force)
14893@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
14894 }
14895 /* a fallback in case there is no PM info available */
14896 tboot_shutdown(TB_SHUTDOWN_HALT);
14897+ unreachable();
14898 }
14899
14900 struct machine_ops machine_ops = {
14901diff -urNp linux-3.0.7/arch/x86/kernel/setup.c linux-3.0.7/arch/x86/kernel/setup.c
14902--- linux-3.0.7/arch/x86/kernel/setup.c 2011-07-21 22:17:23.000000000 -0400
14903+++ linux-3.0.7/arch/x86/kernel/setup.c 2011-10-06 04:17:55.000000000 -0400
14904@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
14905
14906 switch (data->type) {
14907 case SETUP_E820_EXT:
14908- parse_e820_ext(data);
14909+ parse_e820_ext((struct setup_data __force_kernel *)data);
14910 break;
14911 case SETUP_DTB:
14912 add_dtb(pa_data);
14913@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14914 * area (640->1Mb) as ram even though it is not.
14915 * take them out.
14916 */
14917- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14918+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14919 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14920 }
14921
14922@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
14923
14924 if (!boot_params.hdr.root_flags)
14925 root_mountflags &= ~MS_RDONLY;
14926- init_mm.start_code = (unsigned long) _text;
14927- init_mm.end_code = (unsigned long) _etext;
14928+ init_mm.start_code = ktla_ktva((unsigned long) _text);
14929+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
14930 init_mm.end_data = (unsigned long) _edata;
14931 init_mm.brk = _brk_end;
14932
14933- code_resource.start = virt_to_phys(_text);
14934- code_resource.end = virt_to_phys(_etext)-1;
14935- data_resource.start = virt_to_phys(_etext);
14936+ code_resource.start = virt_to_phys(ktla_ktva(_text));
14937+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14938+ data_resource.start = virt_to_phys(_sdata);
14939 data_resource.end = virt_to_phys(_edata)-1;
14940 bss_resource.start = virt_to_phys(&__bss_start);
14941 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14942diff -urNp linux-3.0.7/arch/x86/kernel/setup_percpu.c linux-3.0.7/arch/x86/kernel/setup_percpu.c
14943--- linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-07-21 22:17:23.000000000 -0400
14944+++ linux-3.0.7/arch/x86/kernel/setup_percpu.c 2011-08-23 21:47:55.000000000 -0400
14945@@ -21,19 +21,17 @@
14946 #include <asm/cpu.h>
14947 #include <asm/stackprotector.h>
14948
14949-DEFINE_PER_CPU(int, cpu_number);
14950+#ifdef CONFIG_SMP
14951+DEFINE_PER_CPU(unsigned int, cpu_number);
14952 EXPORT_PER_CPU_SYMBOL(cpu_number);
14953+#endif
14954
14955-#ifdef CONFIG_X86_64
14956 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14957-#else
14958-#define BOOT_PERCPU_OFFSET 0
14959-#endif
14960
14961 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14962 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14963
14964-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14965+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14966 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14967 };
14968 EXPORT_SYMBOL(__per_cpu_offset);
14969@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14970 {
14971 #ifdef CONFIG_X86_32
14972 struct desc_struct gdt;
14973+ unsigned long base = per_cpu_offset(cpu);
14974
14975- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14976- 0x2 | DESCTYPE_S, 0x8);
14977- gdt.s = 1;
14978+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14979+ 0x83 | DESCTYPE_S, 0xC);
14980 write_gdt_entry(get_cpu_gdt_table(cpu),
14981 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14982 #endif
14983@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14984 /* alrighty, percpu areas up and running */
14985 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14986 for_each_possible_cpu(cpu) {
14987+#ifdef CONFIG_CC_STACKPROTECTOR
14988+#ifdef CONFIG_X86_32
14989+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
14990+#endif
14991+#endif
14992 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14993 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14994 per_cpu(cpu_number, cpu) = cpu;
14995@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14996 */
14997 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14998 #endif
14999+#ifdef CONFIG_CC_STACKPROTECTOR
15000+#ifdef CONFIG_X86_32
15001+ if (!cpu)
15002+ per_cpu(stack_canary.canary, cpu) = canary;
15003+#endif
15004+#endif
15005 /*
15006 * Up to this point, the boot CPU has been using .init.data
15007 * area. Reload any changed state for the boot CPU.
15008diff -urNp linux-3.0.7/arch/x86/kernel/signal.c linux-3.0.7/arch/x86/kernel/signal.c
15009--- linux-3.0.7/arch/x86/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
15010+++ linux-3.0.7/arch/x86/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
15011@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15012 * Align the stack pointer according to the i386 ABI,
15013 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15014 */
15015- sp = ((sp + 4) & -16ul) - 4;
15016+ sp = ((sp - 12) & -16ul) - 4;
15017 #else /* !CONFIG_X86_32 */
15018 sp = round_down(sp, 16) - 8;
15019 #endif
15020@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15021 * Return an always-bogus address instead so we will die with SIGSEGV.
15022 */
15023 if (onsigstack && !likely(on_sig_stack(sp)))
15024- return (void __user *)-1L;
15025+ return (__force void __user *)-1L;
15026
15027 /* save i387 state */
15028 if (used_math() && save_i387_xstate(*fpstate) < 0)
15029- return (void __user *)-1L;
15030+ return (__force void __user *)-1L;
15031
15032 return (void __user *)sp;
15033 }
15034@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15035 }
15036
15037 if (current->mm->context.vdso)
15038- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15039+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15040 else
15041- restorer = &frame->retcode;
15042+ restorer = (void __user *)&frame->retcode;
15043 if (ka->sa.sa_flags & SA_RESTORER)
15044 restorer = ka->sa.sa_restorer;
15045
15046@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15047 * reasons and because gdb uses it as a signature to notice
15048 * signal handler stack frames.
15049 */
15050- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15051+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15052
15053 if (err)
15054 return -EFAULT;
15055@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15056 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15057
15058 /* Set up to return from userspace. */
15059- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15060+ if (current->mm->context.vdso)
15061+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15062+ else
15063+ restorer = (void __user *)&frame->retcode;
15064 if (ka->sa.sa_flags & SA_RESTORER)
15065 restorer = ka->sa.sa_restorer;
15066 put_user_ex(restorer, &frame->pretcode);
15067@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15068 * reasons and because gdb uses it as a signature to notice
15069 * signal handler stack frames.
15070 */
15071- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15072+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15073 } put_user_catch(err);
15074
15075 if (err)
15076@@ -769,6 +772,8 @@ static void do_signal(struct pt_regs *re
15077 int signr;
15078 sigset_t *oldset;
15079
15080+ pax_track_stack();
15081+
15082 /*
15083 * We want the common case to go fast, which is why we may in certain
15084 * cases get here from kernel mode. Just return without doing anything
15085@@ -776,7 +781,7 @@ static void do_signal(struct pt_regs *re
15086 * X86_32: vm86 regs switched out by assembly code before reaching
15087 * here, so testing against kernel CS suffices.
15088 */
15089- if (!user_mode(regs))
15090+ if (!user_mode_novm(regs))
15091 return;
15092
15093 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
15094diff -urNp linux-3.0.7/arch/x86/kernel/smpboot.c linux-3.0.7/arch/x86/kernel/smpboot.c
15095--- linux-3.0.7/arch/x86/kernel/smpboot.c 2011-07-21 22:17:23.000000000 -0400
15096+++ linux-3.0.7/arch/x86/kernel/smpboot.c 2011-08-23 21:47:55.000000000 -0400
15097@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15098 set_idle_for_cpu(cpu, c_idle.idle);
15099 do_rest:
15100 per_cpu(current_task, cpu) = c_idle.idle;
15101+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15102 #ifdef CONFIG_X86_32
15103 /* Stack for startup_32 can be just as for start_secondary onwards */
15104 irq_ctx_init(cpu);
15105 #else
15106 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15107 initial_gs = per_cpu_offset(cpu);
15108- per_cpu(kernel_stack, cpu) =
15109- (unsigned long)task_stack_page(c_idle.idle) -
15110- KERNEL_STACK_OFFSET + THREAD_SIZE;
15111+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15112 #endif
15113+
15114+ pax_open_kernel();
15115 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15116+ pax_close_kernel();
15117+
15118 initial_code = (unsigned long)start_secondary;
15119 stack_start = c_idle.idle->thread.sp;
15120
15121@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15122
15123 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15124
15125+#ifdef CONFIG_PAX_PER_CPU_PGD
15126+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15127+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15128+ KERNEL_PGD_PTRS);
15129+#endif
15130+
15131 err = do_boot_cpu(apicid, cpu);
15132 if (err) {
15133 pr_debug("do_boot_cpu failed %d\n", err);
15134diff -urNp linux-3.0.7/arch/x86/kernel/step.c linux-3.0.7/arch/x86/kernel/step.c
15135--- linux-3.0.7/arch/x86/kernel/step.c 2011-07-21 22:17:23.000000000 -0400
15136+++ linux-3.0.7/arch/x86/kernel/step.c 2011-08-23 21:47:55.000000000 -0400
15137@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15138 struct desc_struct *desc;
15139 unsigned long base;
15140
15141- seg &= ~7UL;
15142+ seg >>= 3;
15143
15144 mutex_lock(&child->mm->context.lock);
15145- if (unlikely((seg >> 3) >= child->mm->context.size))
15146+ if (unlikely(seg >= child->mm->context.size))
15147 addr = -1L; /* bogus selector, access would fault */
15148 else {
15149 desc = child->mm->context.ldt + seg;
15150@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15151 addr += base;
15152 }
15153 mutex_unlock(&child->mm->context.lock);
15154- }
15155+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15156+ addr = ktla_ktva(addr);
15157
15158 return addr;
15159 }
15160@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15161 unsigned char opcode[15];
15162 unsigned long addr = convert_ip_to_linear(child, regs);
15163
15164+ if (addr == -EINVAL)
15165+ return 0;
15166+
15167 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15168 for (i = 0; i < copied; i++) {
15169 switch (opcode[i]) {
15170@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
15171
15172 #ifdef CONFIG_X86_64
15173 case 0x40 ... 0x4f:
15174- if (regs->cs != __USER_CS)
15175+ if ((regs->cs & 0xffff) != __USER_CS)
15176 /* 32-bit mode: register increment */
15177 return 0;
15178 /* 64-bit mode: REX prefix */
15179diff -urNp linux-3.0.7/arch/x86/kernel/syscall_table_32.S linux-3.0.7/arch/x86/kernel/syscall_table_32.S
15180--- linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-07-21 22:17:23.000000000 -0400
15181+++ linux-3.0.7/arch/x86/kernel/syscall_table_32.S 2011-08-23 21:47:55.000000000 -0400
15182@@ -1,3 +1,4 @@
15183+.section .rodata,"a",@progbits
15184 ENTRY(sys_call_table)
15185 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15186 .long sys_exit
15187diff -urNp linux-3.0.7/arch/x86/kernel/sys_i386_32.c linux-3.0.7/arch/x86/kernel/sys_i386_32.c
15188--- linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-07-21 22:17:23.000000000 -0400
15189+++ linux-3.0.7/arch/x86/kernel/sys_i386_32.c 2011-08-23 21:47:55.000000000 -0400
15190@@ -24,17 +24,224 @@
15191
15192 #include <asm/syscalls.h>
15193
15194-/*
15195- * Do a system call from kernel instead of calling sys_execve so we
15196- * end up with proper pt_regs.
15197- */
15198-int kernel_execve(const char *filename,
15199- const char *const argv[],
15200- const char *const envp[])
15201+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15202 {
15203- long __res;
15204- asm volatile ("int $0x80"
15205- : "=a" (__res)
15206- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15207- return __res;
15208+ unsigned long pax_task_size = TASK_SIZE;
15209+
15210+#ifdef CONFIG_PAX_SEGMEXEC
15211+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15212+ pax_task_size = SEGMEXEC_TASK_SIZE;
15213+#endif
15214+
15215+ if (len > pax_task_size || addr > pax_task_size - len)
15216+ return -EINVAL;
15217+
15218+ return 0;
15219+}
15220+
15221+unsigned long
15222+arch_get_unmapped_area(struct file *filp, unsigned long addr,
15223+ unsigned long len, unsigned long pgoff, unsigned long flags)
15224+{
15225+ struct mm_struct *mm = current->mm;
15226+ struct vm_area_struct *vma;
15227+ unsigned long start_addr, pax_task_size = TASK_SIZE;
15228+
15229+#ifdef CONFIG_PAX_SEGMEXEC
15230+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15231+ pax_task_size = SEGMEXEC_TASK_SIZE;
15232+#endif
15233+
15234+ pax_task_size -= PAGE_SIZE;
15235+
15236+ if (len > pax_task_size)
15237+ return -ENOMEM;
15238+
15239+ if (flags & MAP_FIXED)
15240+ return addr;
15241+
15242+#ifdef CONFIG_PAX_RANDMMAP
15243+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15244+#endif
15245+
15246+ if (addr) {
15247+ addr = PAGE_ALIGN(addr);
15248+ if (pax_task_size - len >= addr) {
15249+ vma = find_vma(mm, addr);
15250+ if (check_heap_stack_gap(vma, addr, len))
15251+ return addr;
15252+ }
15253+ }
15254+ if (len > mm->cached_hole_size) {
15255+ start_addr = addr = mm->free_area_cache;
15256+ } else {
15257+ start_addr = addr = mm->mmap_base;
15258+ mm->cached_hole_size = 0;
15259+ }
15260+
15261+#ifdef CONFIG_PAX_PAGEEXEC
15262+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15263+ start_addr = 0x00110000UL;
15264+
15265+#ifdef CONFIG_PAX_RANDMMAP
15266+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15267+ start_addr += mm->delta_mmap & 0x03FFF000UL;
15268+#endif
15269+
15270+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15271+ start_addr = addr = mm->mmap_base;
15272+ else
15273+ addr = start_addr;
15274+ }
15275+#endif
15276+
15277+full_search:
15278+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15279+ /* At this point: (!vma || addr < vma->vm_end). */
15280+ if (pax_task_size - len < addr) {
15281+ /*
15282+ * Start a new search - just in case we missed
15283+ * some holes.
15284+ */
15285+ if (start_addr != mm->mmap_base) {
15286+ start_addr = addr = mm->mmap_base;
15287+ mm->cached_hole_size = 0;
15288+ goto full_search;
15289+ }
15290+ return -ENOMEM;
15291+ }
15292+ if (check_heap_stack_gap(vma, addr, len))
15293+ break;
15294+ if (addr + mm->cached_hole_size < vma->vm_start)
15295+ mm->cached_hole_size = vma->vm_start - addr;
15296+ addr = vma->vm_end;
15297+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15298+ start_addr = addr = mm->mmap_base;
15299+ mm->cached_hole_size = 0;
15300+ goto full_search;
15301+ }
15302+ }
15303+
15304+ /*
15305+ * Remember the place where we stopped the search:
15306+ */
15307+ mm->free_area_cache = addr + len;
15308+ return addr;
15309+}
15310+
15311+unsigned long
15312+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15313+ const unsigned long len, const unsigned long pgoff,
15314+ const unsigned long flags)
15315+{
15316+ struct vm_area_struct *vma;
15317+ struct mm_struct *mm = current->mm;
15318+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15319+
15320+#ifdef CONFIG_PAX_SEGMEXEC
15321+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15322+ pax_task_size = SEGMEXEC_TASK_SIZE;
15323+#endif
15324+
15325+ pax_task_size -= PAGE_SIZE;
15326+
15327+ /* requested length too big for entire address space */
15328+ if (len > pax_task_size)
15329+ return -ENOMEM;
15330+
15331+ if (flags & MAP_FIXED)
15332+ return addr;
15333+
15334+#ifdef CONFIG_PAX_PAGEEXEC
15335+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15336+ goto bottomup;
15337+#endif
15338+
15339+#ifdef CONFIG_PAX_RANDMMAP
15340+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15341+#endif
15342+
15343+ /* requesting a specific address */
15344+ if (addr) {
15345+ addr = PAGE_ALIGN(addr);
15346+ if (pax_task_size - len >= addr) {
15347+ vma = find_vma(mm, addr);
15348+ if (check_heap_stack_gap(vma, addr, len))
15349+ return addr;
15350+ }
15351+ }
15352+
15353+ /* check if free_area_cache is useful for us */
15354+ if (len <= mm->cached_hole_size) {
15355+ mm->cached_hole_size = 0;
15356+ mm->free_area_cache = mm->mmap_base;
15357+ }
15358+
15359+ /* either no address requested or can't fit in requested address hole */
15360+ addr = mm->free_area_cache;
15361+
15362+ /* make sure it can fit in the remaining address space */
15363+ if (addr > len) {
15364+ vma = find_vma(mm, addr-len);
15365+ if (check_heap_stack_gap(vma, addr - len, len))
15366+ /* remember the address as a hint for next time */
15367+ return (mm->free_area_cache = addr-len);
15368+ }
15369+
15370+ if (mm->mmap_base < len)
15371+ goto bottomup;
15372+
15373+ addr = mm->mmap_base-len;
15374+
15375+ do {
15376+ /*
15377+ * Lookup failure means no vma is above this address,
15378+ * else if new region fits below vma->vm_start,
15379+ * return with success:
15380+ */
15381+ vma = find_vma(mm, addr);
15382+ if (check_heap_stack_gap(vma, addr, len))
15383+ /* remember the address as a hint for next time */
15384+ return (mm->free_area_cache = addr);
15385+
15386+ /* remember the largest hole we saw so far */
15387+ if (addr + mm->cached_hole_size < vma->vm_start)
15388+ mm->cached_hole_size = vma->vm_start - addr;
15389+
15390+ /* try just below the current vma->vm_start */
15391+ addr = skip_heap_stack_gap(vma, len);
15392+ } while (!IS_ERR_VALUE(addr));
15393+
15394+bottomup:
15395+ /*
15396+ * A failed mmap() very likely causes application failure,
15397+ * so fall back to the bottom-up function here. This scenario
15398+ * can happen with large stack limits and large mmap()
15399+ * allocations.
15400+ */
15401+
15402+#ifdef CONFIG_PAX_SEGMEXEC
15403+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15404+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15405+ else
15406+#endif
15407+
15408+ mm->mmap_base = TASK_UNMAPPED_BASE;
15409+
15410+#ifdef CONFIG_PAX_RANDMMAP
15411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15412+ mm->mmap_base += mm->delta_mmap;
15413+#endif
15414+
15415+ mm->free_area_cache = mm->mmap_base;
15416+ mm->cached_hole_size = ~0UL;
15417+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15418+ /*
15419+ * Restore the topdown base:
15420+ */
15421+ mm->mmap_base = base;
15422+ mm->free_area_cache = base;
15423+ mm->cached_hole_size = ~0UL;
15424+
15425+ return addr;
15426 }
15427diff -urNp linux-3.0.7/arch/x86/kernel/sys_x86_64.c linux-3.0.7/arch/x86/kernel/sys_x86_64.c
15428--- linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-07-21 22:17:23.000000000 -0400
15429+++ linux-3.0.7/arch/x86/kernel/sys_x86_64.c 2011-08-23 21:47:55.000000000 -0400
15430@@ -32,8 +32,8 @@ out:
15431 return error;
15432 }
15433
15434-static void find_start_end(unsigned long flags, unsigned long *begin,
15435- unsigned long *end)
15436+static void find_start_end(struct mm_struct *mm, unsigned long flags,
15437+ unsigned long *begin, unsigned long *end)
15438 {
15439 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15440 unsigned long new_begin;
15441@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15442 *begin = new_begin;
15443 }
15444 } else {
15445- *begin = TASK_UNMAPPED_BASE;
15446+ *begin = mm->mmap_base;
15447 *end = TASK_SIZE;
15448 }
15449 }
15450@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15451 if (flags & MAP_FIXED)
15452 return addr;
15453
15454- find_start_end(flags, &begin, &end);
15455+ find_start_end(mm, flags, &begin, &end);
15456
15457 if (len > end)
15458 return -ENOMEM;
15459
15460+#ifdef CONFIG_PAX_RANDMMAP
15461+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15462+#endif
15463+
15464 if (addr) {
15465 addr = PAGE_ALIGN(addr);
15466 vma = find_vma(mm, addr);
15467- if (end - len >= addr &&
15468- (!vma || addr + len <= vma->vm_start))
15469+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15470 return addr;
15471 }
15472 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15473@@ -106,7 +109,7 @@ full_search:
15474 }
15475 return -ENOMEM;
15476 }
15477- if (!vma || addr + len <= vma->vm_start) {
15478+ if (check_heap_stack_gap(vma, addr, len)) {
15479 /*
15480 * Remember the place where we stopped the search:
15481 */
15482@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15483 {
15484 struct vm_area_struct *vma;
15485 struct mm_struct *mm = current->mm;
15486- unsigned long addr = addr0;
15487+ unsigned long base = mm->mmap_base, addr = addr0;
15488
15489 /* requested length too big for entire address space */
15490 if (len > TASK_SIZE)
15491@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15492 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15493 goto bottomup;
15494
15495+#ifdef CONFIG_PAX_RANDMMAP
15496+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15497+#endif
15498+
15499 /* requesting a specific address */
15500 if (addr) {
15501 addr = PAGE_ALIGN(addr);
15502- vma = find_vma(mm, addr);
15503- if (TASK_SIZE - len >= addr &&
15504- (!vma || addr + len <= vma->vm_start))
15505- return addr;
15506+ if (TASK_SIZE - len >= addr) {
15507+ vma = find_vma(mm, addr);
15508+ if (check_heap_stack_gap(vma, addr, len))
15509+ return addr;
15510+ }
15511 }
15512
15513 /* check if free_area_cache is useful for us */
15514@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15515 /* make sure it can fit in the remaining address space */
15516 if (addr > len) {
15517 vma = find_vma(mm, addr-len);
15518- if (!vma || addr <= vma->vm_start)
15519+ if (check_heap_stack_gap(vma, addr - len, len))
15520 /* remember the address as a hint for next time */
15521 return mm->free_area_cache = addr-len;
15522 }
15523@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15524 * return with success:
15525 */
15526 vma = find_vma(mm, addr);
15527- if (!vma || addr+len <= vma->vm_start)
15528+ if (check_heap_stack_gap(vma, addr, len))
15529 /* remember the address as a hint for next time */
15530 return mm->free_area_cache = addr;
15531
15532@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15533 mm->cached_hole_size = vma->vm_start - addr;
15534
15535 /* try just below the current vma->vm_start */
15536- addr = vma->vm_start-len;
15537- } while (len < vma->vm_start);
15538+ addr = skip_heap_stack_gap(vma, len);
15539+ } while (!IS_ERR_VALUE(addr));
15540
15541 bottomup:
15542 /*
15543@@ -198,13 +206,21 @@ bottomup:
15544 * can happen with large stack limits and large mmap()
15545 * allocations.
15546 */
15547+ mm->mmap_base = TASK_UNMAPPED_BASE;
15548+
15549+#ifdef CONFIG_PAX_RANDMMAP
15550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
15551+ mm->mmap_base += mm->delta_mmap;
15552+#endif
15553+
15554+ mm->free_area_cache = mm->mmap_base;
15555 mm->cached_hole_size = ~0UL;
15556- mm->free_area_cache = TASK_UNMAPPED_BASE;
15557 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15558 /*
15559 * Restore the topdown base:
15560 */
15561- mm->free_area_cache = mm->mmap_base;
15562+ mm->mmap_base = base;
15563+ mm->free_area_cache = base;
15564 mm->cached_hole_size = ~0UL;
15565
15566 return addr;
15567diff -urNp linux-3.0.7/arch/x86/kernel/tboot.c linux-3.0.7/arch/x86/kernel/tboot.c
15568--- linux-3.0.7/arch/x86/kernel/tboot.c 2011-07-21 22:17:23.000000000 -0400
15569+++ linux-3.0.7/arch/x86/kernel/tboot.c 2011-08-23 21:47:55.000000000 -0400
15570@@ -217,7 +217,7 @@ static int tboot_setup_sleep(void)
15571
15572 void tboot_shutdown(u32 shutdown_type)
15573 {
15574- void (*shutdown)(void);
15575+ void (* __noreturn shutdown)(void);
15576
15577 if (!tboot_enabled())
15578 return;
15579@@ -239,7 +239,7 @@ void tboot_shutdown(u32 shutdown_type)
15580
15581 switch_to_tboot_pt();
15582
15583- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15584+ shutdown = (void *)tboot->shutdown_entry;
15585 shutdown();
15586
15587 /* should not reach here */
15588@@ -296,7 +296,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15589 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15590 }
15591
15592-static atomic_t ap_wfs_count;
15593+static atomic_unchecked_t ap_wfs_count;
15594
15595 static int tboot_wait_for_aps(int num_aps)
15596 {
15597@@ -320,9 +320,9 @@ static int __cpuinit tboot_cpu_callback(
15598 {
15599 switch (action) {
15600 case CPU_DYING:
15601- atomic_inc(&ap_wfs_count);
15602+ atomic_inc_unchecked(&ap_wfs_count);
15603 if (num_online_cpus() == 1)
15604- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15605+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15606 return NOTIFY_BAD;
15607 break;
15608 }
15609@@ -341,7 +341,7 @@ static __init int tboot_late_init(void)
15610
15611 tboot_create_trampoline();
15612
15613- atomic_set(&ap_wfs_count, 0);
15614+ atomic_set_unchecked(&ap_wfs_count, 0);
15615 register_hotcpu_notifier(&tboot_cpu_notifier);
15616 return 0;
15617 }
15618diff -urNp linux-3.0.7/arch/x86/kernel/time.c linux-3.0.7/arch/x86/kernel/time.c
15619--- linux-3.0.7/arch/x86/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
15620+++ linux-3.0.7/arch/x86/kernel/time.c 2011-08-23 21:47:55.000000000 -0400
15621@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
15622 {
15623 unsigned long pc = instruction_pointer(regs);
15624
15625- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15626+ if (!user_mode(regs) && in_lock_functions(pc)) {
15627 #ifdef CONFIG_FRAME_POINTER
15628- return *(unsigned long *)(regs->bp + sizeof(long));
15629+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15630 #else
15631 unsigned long *sp =
15632 (unsigned long *)kernel_stack_pointer(regs);
15633@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
15634 * or above a saved flags. Eflags has bits 22-31 zero,
15635 * kernel addresses don't.
15636 */
15637+
15638+#ifdef CONFIG_PAX_KERNEXEC
15639+ return ktla_ktva(sp[0]);
15640+#else
15641 if (sp[0] >> 22)
15642 return sp[0];
15643 if (sp[1] >> 22)
15644 return sp[1];
15645 #endif
15646+
15647+#endif
15648 }
15649 return pc;
15650 }
15651diff -urNp linux-3.0.7/arch/x86/kernel/tls.c linux-3.0.7/arch/x86/kernel/tls.c
15652--- linux-3.0.7/arch/x86/kernel/tls.c 2011-07-21 22:17:23.000000000 -0400
15653+++ linux-3.0.7/arch/x86/kernel/tls.c 2011-08-23 21:47:55.000000000 -0400
15654@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15655 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15656 return -EINVAL;
15657
15658+#ifdef CONFIG_PAX_SEGMEXEC
15659+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15660+ return -EINVAL;
15661+#endif
15662+
15663 set_tls_desc(p, idx, &info, 1);
15664
15665 return 0;
15666diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_32.S linux-3.0.7/arch/x86/kernel/trampoline_32.S
15667--- linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-07-21 22:17:23.000000000 -0400
15668+++ linux-3.0.7/arch/x86/kernel/trampoline_32.S 2011-08-23 21:47:55.000000000 -0400
15669@@ -32,6 +32,12 @@
15670 #include <asm/segment.h>
15671 #include <asm/page_types.h>
15672
15673+#ifdef CONFIG_PAX_KERNEXEC
15674+#define ta(X) (X)
15675+#else
15676+#define ta(X) ((X) - __PAGE_OFFSET)
15677+#endif
15678+
15679 #ifdef CONFIG_SMP
15680
15681 .section ".x86_trampoline","a"
15682@@ -62,7 +68,7 @@ r_base = .
15683 inc %ax # protected mode (PE) bit
15684 lmsw %ax # into protected mode
15685 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15686- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15687+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
15688
15689 # These need to be in the same 64K segment as the above;
15690 # hence we don't use the boot_gdt_descr defined in head.S
15691diff -urNp linux-3.0.7/arch/x86/kernel/trampoline_64.S linux-3.0.7/arch/x86/kernel/trampoline_64.S
15692--- linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-07-21 22:17:23.000000000 -0400
15693+++ linux-3.0.7/arch/x86/kernel/trampoline_64.S 2011-08-23 21:47:55.000000000 -0400
15694@@ -90,7 +90,7 @@ startup_32:
15695 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15696 movl %eax, %ds
15697
15698- movl $X86_CR4_PAE, %eax
15699+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15700 movl %eax, %cr4 # Enable PAE mode
15701
15702 # Setup trampoline 4 level pagetables
15703@@ -138,7 +138,7 @@ tidt:
15704 # so the kernel can live anywhere
15705 .balign 4
15706 tgdt:
15707- .short tgdt_end - tgdt # gdt limit
15708+ .short tgdt_end - tgdt - 1 # gdt limit
15709 .long tgdt - r_base
15710 .short 0
15711 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15712diff -urNp linux-3.0.7/arch/x86/kernel/traps.c linux-3.0.7/arch/x86/kernel/traps.c
15713--- linux-3.0.7/arch/x86/kernel/traps.c 2011-07-21 22:17:23.000000000 -0400
15714+++ linux-3.0.7/arch/x86/kernel/traps.c 2011-08-23 21:47:55.000000000 -0400
15715@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15716
15717 /* Do we ignore FPU interrupts ? */
15718 char ignore_fpu_irq;
15719-
15720-/*
15721- * The IDT has to be page-aligned to simplify the Pentium
15722- * F0 0F bug workaround.
15723- */
15724-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15725 #endif
15726
15727 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15728@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15729 }
15730
15731 static void __kprobes
15732-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15733+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15734 long error_code, siginfo_t *info)
15735 {
15736 struct task_struct *tsk = current;
15737
15738 #ifdef CONFIG_X86_32
15739- if (regs->flags & X86_VM_MASK) {
15740+ if (v8086_mode(regs)) {
15741 /*
15742 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15743 * On nmi (interrupt 2), do_trap should not be called.
15744@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15745 }
15746 #endif
15747
15748- if (!user_mode(regs))
15749+ if (!user_mode_novm(regs))
15750 goto kernel_trap;
15751
15752 #ifdef CONFIG_X86_32
15753@@ -157,7 +151,7 @@ trap_signal:
15754 printk_ratelimit()) {
15755 printk(KERN_INFO
15756 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15757- tsk->comm, tsk->pid, str,
15758+ tsk->comm, task_pid_nr(tsk), str,
15759 regs->ip, regs->sp, error_code);
15760 print_vma_addr(" in ", regs->ip);
15761 printk("\n");
15762@@ -174,8 +168,20 @@ kernel_trap:
15763 if (!fixup_exception(regs)) {
15764 tsk->thread.error_code = error_code;
15765 tsk->thread.trap_no = trapnr;
15766+
15767+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15768+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15769+ str = "PAX: suspicious stack segment fault";
15770+#endif
15771+
15772 die(str, regs, error_code);
15773 }
15774+
15775+#ifdef CONFIG_PAX_REFCOUNT
15776+ if (trapnr == 4)
15777+ pax_report_refcount_overflow(regs);
15778+#endif
15779+
15780 return;
15781
15782 #ifdef CONFIG_X86_32
15783@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15784 conditional_sti(regs);
15785
15786 #ifdef CONFIG_X86_32
15787- if (regs->flags & X86_VM_MASK)
15788+ if (v8086_mode(regs))
15789 goto gp_in_vm86;
15790 #endif
15791
15792 tsk = current;
15793- if (!user_mode(regs))
15794+ if (!user_mode_novm(regs))
15795 goto gp_in_kernel;
15796
15797+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15798+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15799+ struct mm_struct *mm = tsk->mm;
15800+ unsigned long limit;
15801+
15802+ down_write(&mm->mmap_sem);
15803+ limit = mm->context.user_cs_limit;
15804+ if (limit < TASK_SIZE) {
15805+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15806+ up_write(&mm->mmap_sem);
15807+ return;
15808+ }
15809+ up_write(&mm->mmap_sem);
15810+ }
15811+#endif
15812+
15813 tsk->thread.error_code = error_code;
15814 tsk->thread.trap_no = 13;
15815
15816@@ -304,6 +326,13 @@ gp_in_kernel:
15817 if (notify_die(DIE_GPF, "general protection fault", regs,
15818 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15819 return;
15820+
15821+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15822+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15823+ die("PAX: suspicious general protection fault", regs, error_code);
15824+ else
15825+#endif
15826+
15827 die("general protection fault", regs, error_code);
15828 }
15829
15830@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15831 dotraplinkage notrace __kprobes void
15832 do_nmi(struct pt_regs *regs, long error_code)
15833 {
15834+
15835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15836+ if (!user_mode(regs)) {
15837+ unsigned long cs = regs->cs & 0xFFFF;
15838+ unsigned long ip = ktva_ktla(regs->ip);
15839+
15840+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15841+ regs->ip = ip;
15842+ }
15843+#endif
15844+
15845 nmi_enter();
15846
15847 inc_irq_stat(__nmi_count);
15848@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15849 /* It's safe to allow irq's after DR6 has been saved */
15850 preempt_conditional_sti(regs);
15851
15852- if (regs->flags & X86_VM_MASK) {
15853+ if (v8086_mode(regs)) {
15854 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15855 error_code, 1);
15856 preempt_conditional_cli(regs);
15857@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15858 * We already checked v86 mode above, so we can check for kernel mode
15859 * by just checking the CPL of CS.
15860 */
15861- if ((dr6 & DR_STEP) && !user_mode(regs)) {
15862+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15863 tsk->thread.debugreg6 &= ~DR_STEP;
15864 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15865 regs->flags &= ~X86_EFLAGS_TF;
15866@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15867 return;
15868 conditional_sti(regs);
15869
15870- if (!user_mode_vm(regs))
15871+ if (!user_mode(regs))
15872 {
15873 if (!fixup_exception(regs)) {
15874 task->thread.error_code = error_code;
15875@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15876 void __math_state_restore(void)
15877 {
15878 struct thread_info *thread = current_thread_info();
15879- struct task_struct *tsk = thread->task;
15880+ struct task_struct *tsk = current;
15881
15882 /*
15883 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15884@@ -750,8 +790,7 @@ void __math_state_restore(void)
15885 */
15886 asmlinkage void math_state_restore(void)
15887 {
15888- struct thread_info *thread = current_thread_info();
15889- struct task_struct *tsk = thread->task;
15890+ struct task_struct *tsk = current;
15891
15892 if (!tsk_used_math(tsk)) {
15893 local_irq_enable();
15894diff -urNp linux-3.0.7/arch/x86/kernel/verify_cpu.S linux-3.0.7/arch/x86/kernel/verify_cpu.S
15895--- linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-07-21 22:17:23.000000000 -0400
15896+++ linux-3.0.7/arch/x86/kernel/verify_cpu.S 2011-08-23 21:48:14.000000000 -0400
15897@@ -20,6 +20,7 @@
15898 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15899 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15900 * arch/x86/kernel/head_32.S: processor startup
15901+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15902 *
15903 * verify_cpu, returns the status of longmode and SSE in register %eax.
15904 * 0: Success 1: Failure
15905diff -urNp linux-3.0.7/arch/x86/kernel/vm86_32.c linux-3.0.7/arch/x86/kernel/vm86_32.c
15906--- linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-07-21 22:17:23.000000000 -0400
15907+++ linux-3.0.7/arch/x86/kernel/vm86_32.c 2011-08-23 21:48:14.000000000 -0400
15908@@ -41,6 +41,7 @@
15909 #include <linux/ptrace.h>
15910 #include <linux/audit.h>
15911 #include <linux/stddef.h>
15912+#include <linux/grsecurity.h>
15913
15914 #include <asm/uaccess.h>
15915 #include <asm/io.h>
15916@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15917 do_exit(SIGSEGV);
15918 }
15919
15920- tss = &per_cpu(init_tss, get_cpu());
15921+ tss = init_tss + get_cpu();
15922 current->thread.sp0 = current->thread.saved_sp0;
15923 current->thread.sysenter_cs = __KERNEL_CS;
15924 load_sp0(tss, &current->thread);
15925@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15926 struct task_struct *tsk;
15927 int tmp, ret = -EPERM;
15928
15929+#ifdef CONFIG_GRKERNSEC_VM86
15930+ if (!capable(CAP_SYS_RAWIO)) {
15931+ gr_handle_vm86();
15932+ goto out;
15933+ }
15934+#endif
15935+
15936 tsk = current;
15937 if (tsk->thread.saved_sp0)
15938 goto out;
15939@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15940 int tmp, ret;
15941 struct vm86plus_struct __user *v86;
15942
15943+#ifdef CONFIG_GRKERNSEC_VM86
15944+ if (!capable(CAP_SYS_RAWIO)) {
15945+ gr_handle_vm86();
15946+ ret = -EPERM;
15947+ goto out;
15948+ }
15949+#endif
15950+
15951 tsk = current;
15952 switch (cmd) {
15953 case VM86_REQUEST_IRQ:
15954@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15955 tsk->thread.saved_fs = info->regs32->fs;
15956 tsk->thread.saved_gs = get_user_gs(info->regs32);
15957
15958- tss = &per_cpu(init_tss, get_cpu());
15959+ tss = init_tss + get_cpu();
15960 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15961 if (cpu_has_sep)
15962 tsk->thread.sysenter_cs = 0;
15963@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15964 goto cannot_handle;
15965 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15966 goto cannot_handle;
15967- intr_ptr = (unsigned long __user *) (i << 2);
15968+ intr_ptr = (__force unsigned long __user *) (i << 2);
15969 if (get_user(segoffs, intr_ptr))
15970 goto cannot_handle;
15971 if ((segoffs >> 16) == BIOSSEG)
15972diff -urNp linux-3.0.7/arch/x86/kernel/vmlinux.lds.S linux-3.0.7/arch/x86/kernel/vmlinux.lds.S
15973--- linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-07-21 22:17:23.000000000 -0400
15974+++ linux-3.0.7/arch/x86/kernel/vmlinux.lds.S 2011-08-23 21:47:55.000000000 -0400
15975@@ -26,6 +26,13 @@
15976 #include <asm/page_types.h>
15977 #include <asm/cache.h>
15978 #include <asm/boot.h>
15979+#include <asm/segment.h>
15980+
15981+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15982+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15983+#else
15984+#define __KERNEL_TEXT_OFFSET 0
15985+#endif
15986
15987 #undef i386 /* in case the preprocessor is a 32bit one */
15988
15989@@ -69,31 +76,46 @@ jiffies_64 = jiffies;
15990
15991 PHDRS {
15992 text PT_LOAD FLAGS(5); /* R_E */
15993+#ifdef CONFIG_X86_32
15994+ module PT_LOAD FLAGS(5); /* R_E */
15995+#endif
15996+#ifdef CONFIG_XEN
15997+ rodata PT_LOAD FLAGS(5); /* R_E */
15998+#else
15999+ rodata PT_LOAD FLAGS(4); /* R__ */
16000+#endif
16001 data PT_LOAD FLAGS(6); /* RW_ */
16002 #ifdef CONFIG_X86_64
16003 user PT_LOAD FLAGS(5); /* R_E */
16004+#endif
16005+ init.begin PT_LOAD FLAGS(6); /* RW_ */
16006 #ifdef CONFIG_SMP
16007 percpu PT_LOAD FLAGS(6); /* RW_ */
16008 #endif
16009+ text.init PT_LOAD FLAGS(5); /* R_E */
16010+ text.exit PT_LOAD FLAGS(5); /* R_E */
16011 init PT_LOAD FLAGS(7); /* RWE */
16012-#endif
16013 note PT_NOTE FLAGS(0); /* ___ */
16014 }
16015
16016 SECTIONS
16017 {
16018 #ifdef CONFIG_X86_32
16019- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16020- phys_startup_32 = startup_32 - LOAD_OFFSET;
16021+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16022 #else
16023- . = __START_KERNEL;
16024- phys_startup_64 = startup_64 - LOAD_OFFSET;
16025+ . = __START_KERNEL;
16026 #endif
16027
16028 /* Text and read-only data */
16029- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16030- _text = .;
16031+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16032 /* bootstrapping code */
16033+#ifdef CONFIG_X86_32
16034+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16035+#else
16036+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16037+#endif
16038+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16039+ _text = .;
16040 HEAD_TEXT
16041 #ifdef CONFIG_X86_32
16042 . = ALIGN(PAGE_SIZE);
16043@@ -109,13 +131,47 @@ SECTIONS
16044 IRQENTRY_TEXT
16045 *(.fixup)
16046 *(.gnu.warning)
16047- /* End of text section */
16048- _etext = .;
16049 } :text = 0x9090
16050
16051- NOTES :text :note
16052+ . += __KERNEL_TEXT_OFFSET;
16053+
16054+#ifdef CONFIG_X86_32
16055+ . = ALIGN(PAGE_SIZE);
16056+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16057+
16058+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16059+ MODULES_EXEC_VADDR = .;
16060+ BYTE(0)
16061+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16062+ . = ALIGN(HPAGE_SIZE);
16063+ MODULES_EXEC_END = . - 1;
16064+#endif
16065+
16066+ } :module
16067+#endif
16068+
16069+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16070+ /* End of text section */
16071+ _etext = . - __KERNEL_TEXT_OFFSET;
16072+ }
16073+
16074+#ifdef CONFIG_X86_32
16075+ . = ALIGN(PAGE_SIZE);
16076+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16077+ *(.idt)
16078+ . = ALIGN(PAGE_SIZE);
16079+ *(.empty_zero_page)
16080+ *(.initial_pg_fixmap)
16081+ *(.initial_pg_pmd)
16082+ *(.initial_page_table)
16083+ *(.swapper_pg_dir)
16084+ } :rodata
16085+#endif
16086+
16087+ . = ALIGN(PAGE_SIZE);
16088+ NOTES :rodata :note
16089
16090- EXCEPTION_TABLE(16) :text = 0x9090
16091+ EXCEPTION_TABLE(16) :rodata
16092
16093 #if defined(CONFIG_DEBUG_RODATA)
16094 /* .text should occupy whole number of pages */
16095@@ -127,16 +183,20 @@ SECTIONS
16096
16097 /* Data */
16098 .data : AT(ADDR(.data) - LOAD_OFFSET) {
16099+
16100+#ifdef CONFIG_PAX_KERNEXEC
16101+ . = ALIGN(HPAGE_SIZE);
16102+#else
16103+ . = ALIGN(PAGE_SIZE);
16104+#endif
16105+
16106 /* Start of data section */
16107 _sdata = .;
16108
16109 /* init_task */
16110 INIT_TASK_DATA(THREAD_SIZE)
16111
16112-#ifdef CONFIG_X86_32
16113- /* 32 bit has nosave before _edata */
16114 NOSAVE_DATA
16115-#endif
16116
16117 PAGE_ALIGNED_DATA(PAGE_SIZE)
16118
16119@@ -208,12 +268,19 @@ SECTIONS
16120 #endif /* CONFIG_X86_64 */
16121
16122 /* Init code and data - will be freed after init */
16123- . = ALIGN(PAGE_SIZE);
16124 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16125+ BYTE(0)
16126+
16127+#ifdef CONFIG_PAX_KERNEXEC
16128+ . = ALIGN(HPAGE_SIZE);
16129+#else
16130+ . = ALIGN(PAGE_SIZE);
16131+#endif
16132+
16133 __init_begin = .; /* paired with __init_end */
16134- }
16135+ } :init.begin
16136
16137-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16138+#ifdef CONFIG_SMP
16139 /*
16140 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16141 * output PHDR, so the next output section - .init.text - should
16142@@ -222,12 +289,27 @@ SECTIONS
16143 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16144 #endif
16145
16146- INIT_TEXT_SECTION(PAGE_SIZE)
16147-#ifdef CONFIG_X86_64
16148- :init
16149-#endif
16150+ . = ALIGN(PAGE_SIZE);
16151+ init_begin = .;
16152+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16153+ VMLINUX_SYMBOL(_sinittext) = .;
16154+ INIT_TEXT
16155+ VMLINUX_SYMBOL(_einittext) = .;
16156+ . = ALIGN(PAGE_SIZE);
16157+ } :text.init
16158
16159- INIT_DATA_SECTION(16)
16160+ /*
16161+ * .exit.text is discard at runtime, not link time, to deal with
16162+ * references from .altinstructions and .eh_frame
16163+ */
16164+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16165+ EXIT_TEXT
16166+ . = ALIGN(16);
16167+ } :text.exit
16168+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16169+
16170+ . = ALIGN(PAGE_SIZE);
16171+ INIT_DATA_SECTION(16) :init
16172
16173 /*
16174 * Code and data for a variety of lowlevel trampolines, to be
16175@@ -301,19 +383,12 @@ SECTIONS
16176 }
16177
16178 . = ALIGN(8);
16179- /*
16180- * .exit.text is discard at runtime, not link time, to deal with
16181- * references from .altinstructions and .eh_frame
16182- */
16183- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16184- EXIT_TEXT
16185- }
16186
16187 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16188 EXIT_DATA
16189 }
16190
16191-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16192+#ifndef CONFIG_SMP
16193 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16194 #endif
16195
16196@@ -332,16 +407,10 @@ SECTIONS
16197 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16198 __smp_locks = .;
16199 *(.smp_locks)
16200- . = ALIGN(PAGE_SIZE);
16201 __smp_locks_end = .;
16202+ . = ALIGN(PAGE_SIZE);
16203 }
16204
16205-#ifdef CONFIG_X86_64
16206- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16207- NOSAVE_DATA
16208- }
16209-#endif
16210-
16211 /* BSS */
16212 . = ALIGN(PAGE_SIZE);
16213 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16214@@ -357,6 +426,7 @@ SECTIONS
16215 __brk_base = .;
16216 . += 64 * 1024; /* 64k alignment slop space */
16217 *(.brk_reservation) /* areas brk users have reserved */
16218+ . = ALIGN(HPAGE_SIZE);
16219 __brk_limit = .;
16220 }
16221
16222@@ -383,13 +453,12 @@ SECTIONS
16223 * for the boot processor.
16224 */
16225 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16226-INIT_PER_CPU(gdt_page);
16227 INIT_PER_CPU(irq_stack_union);
16228
16229 /*
16230 * Build-time check on the image size:
16231 */
16232-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16233+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16234 "kernel image bigger than KERNEL_IMAGE_SIZE");
16235
16236 #ifdef CONFIG_SMP
16237diff -urNp linux-3.0.7/arch/x86/kernel/vsyscall_64.c linux-3.0.7/arch/x86/kernel/vsyscall_64.c
16238--- linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-07-21 22:17:23.000000000 -0400
16239+++ linux-3.0.7/arch/x86/kernel/vsyscall_64.c 2011-08-23 21:47:55.000000000 -0400
16240@@ -53,7 +53,7 @@ DEFINE_VVAR(int, vgetcpu_mode);
16241 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
16242 {
16243 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16244- .sysctl_enabled = 1,
16245+ .sysctl_enabled = 0,
16246 };
16247
16248 void update_vsyscall_tz(void)
16249@@ -231,7 +231,7 @@ static long __vsyscall(3) venosys_1(void
16250 static ctl_table kernel_table2[] = {
16251 { .procname = "vsyscall64",
16252 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
16253- .mode = 0644,
16254+ .mode = 0444,
16255 .proc_handler = proc_dointvec },
16256 {}
16257 };
16258diff -urNp linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c
16259--- linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-07-21 22:17:23.000000000 -0400
16260+++ linux-3.0.7/arch/x86/kernel/x8664_ksyms_64.c 2011-08-23 21:47:55.000000000 -0400
16261@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16262 EXPORT_SYMBOL(copy_user_generic_string);
16263 EXPORT_SYMBOL(copy_user_generic_unrolled);
16264 EXPORT_SYMBOL(__copy_user_nocache);
16265-EXPORT_SYMBOL(_copy_from_user);
16266-EXPORT_SYMBOL(_copy_to_user);
16267
16268 EXPORT_SYMBOL(copy_page);
16269 EXPORT_SYMBOL(clear_page);
16270diff -urNp linux-3.0.7/arch/x86/kernel/xsave.c linux-3.0.7/arch/x86/kernel/xsave.c
16271--- linux-3.0.7/arch/x86/kernel/xsave.c 2011-07-21 22:17:23.000000000 -0400
16272+++ linux-3.0.7/arch/x86/kernel/xsave.c 2011-10-06 04:17:55.000000000 -0400
16273@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16274 fx_sw_user->xstate_size > fx_sw_user->extended_size)
16275 return -EINVAL;
16276
16277- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16278+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16279 fx_sw_user->extended_size -
16280 FP_XSTATE_MAGIC2_SIZE));
16281 if (err)
16282@@ -267,7 +267,7 @@ fx_only:
16283 * the other extended state.
16284 */
16285 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16286- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16287+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16288 }
16289
16290 /*
16291@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16292 if (use_xsave())
16293 err = restore_user_xstate(buf);
16294 else
16295- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16296+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16297 buf);
16298 if (unlikely(err)) {
16299 /*
16300diff -urNp linux-3.0.7/arch/x86/kvm/emulate.c linux-3.0.7/arch/x86/kvm/emulate.c
16301--- linux-3.0.7/arch/x86/kvm/emulate.c 2011-07-21 22:17:23.000000000 -0400
16302+++ linux-3.0.7/arch/x86/kvm/emulate.c 2011-08-23 21:47:55.000000000 -0400
16303@@ -96,7 +96,7 @@
16304 #define Src2ImmByte (2<<29)
16305 #define Src2One (3<<29)
16306 #define Src2Imm (4<<29)
16307-#define Src2Mask (7<<29)
16308+#define Src2Mask (7U<<29)
16309
16310 #define X2(x...) x, x
16311 #define X3(x...) X2(x), x
16312@@ -207,6 +207,7 @@ struct gprefix {
16313
16314 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16315 do { \
16316+ unsigned long _tmp; \
16317 __asm__ __volatile__ ( \
16318 _PRE_EFLAGS("0", "4", "2") \
16319 _op _suffix " %"_x"3,%1; " \
16320@@ -220,8 +221,6 @@ struct gprefix {
16321 /* Raw emulation: instruction has two explicit operands. */
16322 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16323 do { \
16324- unsigned long _tmp; \
16325- \
16326 switch ((_dst).bytes) { \
16327 case 2: \
16328 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16329@@ -237,7 +236,6 @@ struct gprefix {
16330
16331 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16332 do { \
16333- unsigned long _tmp; \
16334 switch ((_dst).bytes) { \
16335 case 1: \
16336 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16337diff -urNp linux-3.0.7/arch/x86/kvm/lapic.c linux-3.0.7/arch/x86/kvm/lapic.c
16338--- linux-3.0.7/arch/x86/kvm/lapic.c 2011-07-21 22:17:23.000000000 -0400
16339+++ linux-3.0.7/arch/x86/kvm/lapic.c 2011-08-23 21:47:55.000000000 -0400
16340@@ -53,7 +53,7 @@
16341 #define APIC_BUS_CYCLE_NS 1
16342
16343 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16344-#define apic_debug(fmt, arg...)
16345+#define apic_debug(fmt, arg...) do {} while (0)
16346
16347 #define APIC_LVT_NUM 6
16348 /* 14 is the version for Xeon and Pentium 8.4.8*/
16349diff -urNp linux-3.0.7/arch/x86/kvm/mmu.c linux-3.0.7/arch/x86/kvm/mmu.c
16350--- linux-3.0.7/arch/x86/kvm/mmu.c 2011-07-21 22:17:23.000000000 -0400
16351+++ linux-3.0.7/arch/x86/kvm/mmu.c 2011-08-23 21:47:55.000000000 -0400
16352@@ -3238,7 +3238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16353
16354 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16355
16356- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16357+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16358
16359 /*
16360 * Assume that the pte write on a page table of the same type
16361@@ -3270,7 +3270,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16362 }
16363
16364 spin_lock(&vcpu->kvm->mmu_lock);
16365- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16366+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16367 gentry = 0;
16368 kvm_mmu_free_some_pages(vcpu);
16369 ++vcpu->kvm->stat.mmu_pte_write;
16370diff -urNp linux-3.0.7/arch/x86/kvm/paging_tmpl.h linux-3.0.7/arch/x86/kvm/paging_tmpl.h
16371--- linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-07-21 22:17:23.000000000 -0400
16372+++ linux-3.0.7/arch/x86/kvm/paging_tmpl.h 2011-10-06 04:17:55.000000000 -0400
16373@@ -182,7 +182,7 @@ walk:
16374 break;
16375 }
16376
16377- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
16378+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
16379 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
16380 present = false;
16381 break;
16382@@ -583,6 +583,8 @@ static int FNAME(page_fault)(struct kvm_
16383 unsigned long mmu_seq;
16384 bool map_writable;
16385
16386+ pax_track_stack();
16387+
16388 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16389
16390 r = mmu_topup_memory_caches(vcpu);
16391@@ -703,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16392 if (need_flush)
16393 kvm_flush_remote_tlbs(vcpu->kvm);
16394
16395- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16396+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16397
16398 spin_unlock(&vcpu->kvm->mmu_lock);
16399
16400diff -urNp linux-3.0.7/arch/x86/kvm/svm.c linux-3.0.7/arch/x86/kvm/svm.c
16401--- linux-3.0.7/arch/x86/kvm/svm.c 2011-07-21 22:17:23.000000000 -0400
16402+++ linux-3.0.7/arch/x86/kvm/svm.c 2011-08-23 21:47:55.000000000 -0400
16403@@ -3377,7 +3377,11 @@ static void reload_tss(struct kvm_vcpu *
16404 int cpu = raw_smp_processor_id();
16405
16406 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16407+
16408+ pax_open_kernel();
16409 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16410+ pax_close_kernel();
16411+
16412 load_TR_desc();
16413 }
16414
16415@@ -3755,6 +3759,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16416 #endif
16417 #endif
16418
16419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16420+ __set_fs(current_thread_info()->addr_limit);
16421+#endif
16422+
16423 reload_tss(vcpu);
16424
16425 local_irq_disable();
16426diff -urNp linux-3.0.7/arch/x86/kvm/vmx.c linux-3.0.7/arch/x86/kvm/vmx.c
16427--- linux-3.0.7/arch/x86/kvm/vmx.c 2011-07-21 22:17:23.000000000 -0400
16428+++ linux-3.0.7/arch/x86/kvm/vmx.c 2011-08-23 21:47:55.000000000 -0400
16429@@ -797,7 +797,11 @@ static void reload_tss(void)
16430 struct desc_struct *descs;
16431
16432 descs = (void *)gdt->address;
16433+
16434+ pax_open_kernel();
16435 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16436+ pax_close_kernel();
16437+
16438 load_TR_desc();
16439 }
16440
16441@@ -1747,8 +1751,11 @@ static __init int hardware_setup(void)
16442 if (!cpu_has_vmx_flexpriority())
16443 flexpriority_enabled = 0;
16444
16445- if (!cpu_has_vmx_tpr_shadow())
16446- kvm_x86_ops->update_cr8_intercept = NULL;
16447+ if (!cpu_has_vmx_tpr_shadow()) {
16448+ pax_open_kernel();
16449+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16450+ pax_close_kernel();
16451+ }
16452
16453 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16454 kvm_disable_largepages();
16455@@ -2814,7 +2821,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16456 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16457
16458 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16459- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16460+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16461 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16462 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16463 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16464@@ -4211,6 +4218,12 @@ static void __noclone vmx_vcpu_run(struc
16465 "jmp .Lkvm_vmx_return \n\t"
16466 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16467 ".Lkvm_vmx_return: "
16468+
16469+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16470+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16471+ ".Lkvm_vmx_return2: "
16472+#endif
16473+
16474 /* Save guest registers, load host registers, keep flags */
16475 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16476 "pop %0 \n\t"
16477@@ -4259,6 +4272,11 @@ static void __noclone vmx_vcpu_run(struc
16478 #endif
16479 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16480 [wordsize]"i"(sizeof(ulong))
16481+
16482+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16483+ ,[cs]"i"(__KERNEL_CS)
16484+#endif
16485+
16486 : "cc", "memory"
16487 , R"ax", R"bx", R"di", R"si"
16488 #ifdef CONFIG_X86_64
16489@@ -4276,7 +4294,16 @@ static void __noclone vmx_vcpu_run(struc
16490
16491 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16492
16493- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16494+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16495+
16496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16497+ loadsegment(fs, __KERNEL_PERCPU);
16498+#endif
16499+
16500+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16501+ __set_fs(current_thread_info()->addr_limit);
16502+#endif
16503+
16504 vmx->launched = 1;
16505
16506 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16507diff -urNp linux-3.0.7/arch/x86/kvm/x86.c linux-3.0.7/arch/x86/kvm/x86.c
16508--- linux-3.0.7/arch/x86/kvm/x86.c 2011-07-21 22:17:23.000000000 -0400
16509+++ linux-3.0.7/arch/x86/kvm/x86.c 2011-10-06 04:17:55.000000000 -0400
16510@@ -1313,8 +1313,8 @@ static int xen_hvm_config(struct kvm_vcp
16511 {
16512 struct kvm *kvm = vcpu->kvm;
16513 int lm = is_long_mode(vcpu);
16514- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16515- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16516+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
16517+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
16518 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
16519 : kvm->arch.xen_hvm_config.blob_size_32;
16520 u32 page_num = data & ~PAGE_MASK;
16521@@ -2057,6 +2057,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16522 if (n < msr_list.nmsrs)
16523 goto out;
16524 r = -EFAULT;
16525+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16526+ goto out;
16527 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16528 num_msrs_to_save * sizeof(u32)))
16529 goto out;
16530@@ -2229,15 +2231,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16531 struct kvm_cpuid2 *cpuid,
16532 struct kvm_cpuid_entry2 __user *entries)
16533 {
16534- int r;
16535+ int r, i;
16536
16537 r = -E2BIG;
16538 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16539 goto out;
16540 r = -EFAULT;
16541- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16542- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16543+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16544 goto out;
16545+ for (i = 0; i < cpuid->nent; ++i) {
16546+ struct kvm_cpuid_entry2 cpuid_entry;
16547+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16548+ goto out;
16549+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
16550+ }
16551 vcpu->arch.cpuid_nent = cpuid->nent;
16552 kvm_apic_set_version(vcpu);
16553 kvm_x86_ops->cpuid_update(vcpu);
16554@@ -2252,15 +2259,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16555 struct kvm_cpuid2 *cpuid,
16556 struct kvm_cpuid_entry2 __user *entries)
16557 {
16558- int r;
16559+ int r, i;
16560
16561 r = -E2BIG;
16562 if (cpuid->nent < vcpu->arch.cpuid_nent)
16563 goto out;
16564 r = -EFAULT;
16565- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16566- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16567+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16568 goto out;
16569+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16570+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16571+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16572+ goto out;
16573+ }
16574 return 0;
16575
16576 out:
16577@@ -2579,7 +2590,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16578 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16579 struct kvm_interrupt *irq)
16580 {
16581- if (irq->irq < 0 || irq->irq >= 256)
16582+ if (irq->irq >= 256)
16583 return -EINVAL;
16584 if (irqchip_in_kernel(vcpu->kvm))
16585 return -ENXIO;
16586@@ -4878,7 +4889,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16587 }
16588 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16589
16590-int kvm_arch_init(void *opaque)
16591+int kvm_arch_init(const void *opaque)
16592 {
16593 int r;
16594 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16595diff -urNp linux-3.0.7/arch/x86/lguest/boot.c linux-3.0.7/arch/x86/lguest/boot.c
16596--- linux-3.0.7/arch/x86/lguest/boot.c 2011-07-21 22:17:23.000000000 -0400
16597+++ linux-3.0.7/arch/x86/lguest/boot.c 2011-08-23 21:47:55.000000000 -0400
16598@@ -1176,9 +1176,10 @@ static __init int early_put_chars(u32 vt
16599 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16600 * Launcher to reboot us.
16601 */
16602-static void lguest_restart(char *reason)
16603+static __noreturn void lguest_restart(char *reason)
16604 {
16605 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16606+ BUG();
16607 }
16608
16609 /*G:050
16610diff -urNp linux-3.0.7/arch/x86/lib/atomic64_32.c linux-3.0.7/arch/x86/lib/atomic64_32.c
16611--- linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-07-21 22:17:23.000000000 -0400
16612+++ linux-3.0.7/arch/x86/lib/atomic64_32.c 2011-08-23 21:47:55.000000000 -0400
16613@@ -8,18 +8,30 @@
16614
16615 long long atomic64_read_cx8(long long, const atomic64_t *v);
16616 EXPORT_SYMBOL(atomic64_read_cx8);
16617+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16618+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16619 long long atomic64_set_cx8(long long, const atomic64_t *v);
16620 EXPORT_SYMBOL(atomic64_set_cx8);
16621+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16622+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16623 long long atomic64_xchg_cx8(long long, unsigned high);
16624 EXPORT_SYMBOL(atomic64_xchg_cx8);
16625 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16626 EXPORT_SYMBOL(atomic64_add_return_cx8);
16627+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16628+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16629 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16630 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16631+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16632+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16633 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16634 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16635+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16636+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16637 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16638 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16639+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16640+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16641 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16642 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16643 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16644@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16645 #ifndef CONFIG_X86_CMPXCHG64
16646 long long atomic64_read_386(long long, const atomic64_t *v);
16647 EXPORT_SYMBOL(atomic64_read_386);
16648+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16649+EXPORT_SYMBOL(atomic64_read_unchecked_386);
16650 long long atomic64_set_386(long long, const atomic64_t *v);
16651 EXPORT_SYMBOL(atomic64_set_386);
16652+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16653+EXPORT_SYMBOL(atomic64_set_unchecked_386);
16654 long long atomic64_xchg_386(long long, unsigned high);
16655 EXPORT_SYMBOL(atomic64_xchg_386);
16656 long long atomic64_add_return_386(long long a, atomic64_t *v);
16657 EXPORT_SYMBOL(atomic64_add_return_386);
16658+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16659+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16660 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16661 EXPORT_SYMBOL(atomic64_sub_return_386);
16662+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16663+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16664 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16665 EXPORT_SYMBOL(atomic64_inc_return_386);
16666+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16667+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16668 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16669 EXPORT_SYMBOL(atomic64_dec_return_386);
16670+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16671+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16672 long long atomic64_add_386(long long a, atomic64_t *v);
16673 EXPORT_SYMBOL(atomic64_add_386);
16674+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16675+EXPORT_SYMBOL(atomic64_add_unchecked_386);
16676 long long atomic64_sub_386(long long a, atomic64_t *v);
16677 EXPORT_SYMBOL(atomic64_sub_386);
16678+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16679+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16680 long long atomic64_inc_386(long long a, atomic64_t *v);
16681 EXPORT_SYMBOL(atomic64_inc_386);
16682+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16683+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16684 long long atomic64_dec_386(long long a, atomic64_t *v);
16685 EXPORT_SYMBOL(atomic64_dec_386);
16686+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16687+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16688 long long atomic64_dec_if_positive_386(atomic64_t *v);
16689 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16690 int atomic64_inc_not_zero_386(atomic64_t *v);
16691diff -urNp linux-3.0.7/arch/x86/lib/atomic64_386_32.S linux-3.0.7/arch/x86/lib/atomic64_386_32.S
16692--- linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-07-21 22:17:23.000000000 -0400
16693+++ linux-3.0.7/arch/x86/lib/atomic64_386_32.S 2011-08-23 21:47:55.000000000 -0400
16694@@ -48,6 +48,10 @@ BEGIN(read)
16695 movl (v), %eax
16696 movl 4(v), %edx
16697 RET_ENDP
16698+BEGIN(read_unchecked)
16699+ movl (v), %eax
16700+ movl 4(v), %edx
16701+RET_ENDP
16702 #undef v
16703
16704 #define v %esi
16705@@ -55,6 +59,10 @@ BEGIN(set)
16706 movl %ebx, (v)
16707 movl %ecx, 4(v)
16708 RET_ENDP
16709+BEGIN(set_unchecked)
16710+ movl %ebx, (v)
16711+ movl %ecx, 4(v)
16712+RET_ENDP
16713 #undef v
16714
16715 #define v %esi
16716@@ -70,6 +78,20 @@ RET_ENDP
16717 BEGIN(add)
16718 addl %eax, (v)
16719 adcl %edx, 4(v)
16720+
16721+#ifdef CONFIG_PAX_REFCOUNT
16722+ jno 0f
16723+ subl %eax, (v)
16724+ sbbl %edx, 4(v)
16725+ int $4
16726+0:
16727+ _ASM_EXTABLE(0b, 0b)
16728+#endif
16729+
16730+RET_ENDP
16731+BEGIN(add_unchecked)
16732+ addl %eax, (v)
16733+ adcl %edx, 4(v)
16734 RET_ENDP
16735 #undef v
16736
16737@@ -77,6 +99,24 @@ RET_ENDP
16738 BEGIN(add_return)
16739 addl (v), %eax
16740 adcl 4(v), %edx
16741+
16742+#ifdef CONFIG_PAX_REFCOUNT
16743+ into
16744+1234:
16745+ _ASM_EXTABLE(1234b, 2f)
16746+#endif
16747+
16748+ movl %eax, (v)
16749+ movl %edx, 4(v)
16750+
16751+#ifdef CONFIG_PAX_REFCOUNT
16752+2:
16753+#endif
16754+
16755+RET_ENDP
16756+BEGIN(add_return_unchecked)
16757+ addl (v), %eax
16758+ adcl 4(v), %edx
16759 movl %eax, (v)
16760 movl %edx, 4(v)
16761 RET_ENDP
16762@@ -86,6 +126,20 @@ RET_ENDP
16763 BEGIN(sub)
16764 subl %eax, (v)
16765 sbbl %edx, 4(v)
16766+
16767+#ifdef CONFIG_PAX_REFCOUNT
16768+ jno 0f
16769+ addl %eax, (v)
16770+ adcl %edx, 4(v)
16771+ int $4
16772+0:
16773+ _ASM_EXTABLE(0b, 0b)
16774+#endif
16775+
16776+RET_ENDP
16777+BEGIN(sub_unchecked)
16778+ subl %eax, (v)
16779+ sbbl %edx, 4(v)
16780 RET_ENDP
16781 #undef v
16782
16783@@ -96,6 +150,27 @@ BEGIN(sub_return)
16784 sbbl $0, %edx
16785 addl (v), %eax
16786 adcl 4(v), %edx
16787+
16788+#ifdef CONFIG_PAX_REFCOUNT
16789+ into
16790+1234:
16791+ _ASM_EXTABLE(1234b, 2f)
16792+#endif
16793+
16794+ movl %eax, (v)
16795+ movl %edx, 4(v)
16796+
16797+#ifdef CONFIG_PAX_REFCOUNT
16798+2:
16799+#endif
16800+
16801+RET_ENDP
16802+BEGIN(sub_return_unchecked)
16803+ negl %edx
16804+ negl %eax
16805+ sbbl $0, %edx
16806+ addl (v), %eax
16807+ adcl 4(v), %edx
16808 movl %eax, (v)
16809 movl %edx, 4(v)
16810 RET_ENDP
16811@@ -105,6 +180,20 @@ RET_ENDP
16812 BEGIN(inc)
16813 addl $1, (v)
16814 adcl $0, 4(v)
16815+
16816+#ifdef CONFIG_PAX_REFCOUNT
16817+ jno 0f
16818+ subl $1, (v)
16819+ sbbl $0, 4(v)
16820+ int $4
16821+0:
16822+ _ASM_EXTABLE(0b, 0b)
16823+#endif
16824+
16825+RET_ENDP
16826+BEGIN(inc_unchecked)
16827+ addl $1, (v)
16828+ adcl $0, 4(v)
16829 RET_ENDP
16830 #undef v
16831
16832@@ -114,6 +203,26 @@ BEGIN(inc_return)
16833 movl 4(v), %edx
16834 addl $1, %eax
16835 adcl $0, %edx
16836+
16837+#ifdef CONFIG_PAX_REFCOUNT
16838+ into
16839+1234:
16840+ _ASM_EXTABLE(1234b, 2f)
16841+#endif
16842+
16843+ movl %eax, (v)
16844+ movl %edx, 4(v)
16845+
16846+#ifdef CONFIG_PAX_REFCOUNT
16847+2:
16848+#endif
16849+
16850+RET_ENDP
16851+BEGIN(inc_return_unchecked)
16852+ movl (v), %eax
16853+ movl 4(v), %edx
16854+ addl $1, %eax
16855+ adcl $0, %edx
16856 movl %eax, (v)
16857 movl %edx, 4(v)
16858 RET_ENDP
16859@@ -123,6 +232,20 @@ RET_ENDP
16860 BEGIN(dec)
16861 subl $1, (v)
16862 sbbl $0, 4(v)
16863+
16864+#ifdef CONFIG_PAX_REFCOUNT
16865+ jno 0f
16866+ addl $1, (v)
16867+ adcl $0, 4(v)
16868+ int $4
16869+0:
16870+ _ASM_EXTABLE(0b, 0b)
16871+#endif
16872+
16873+RET_ENDP
16874+BEGIN(dec_unchecked)
16875+ subl $1, (v)
16876+ sbbl $0, 4(v)
16877 RET_ENDP
16878 #undef v
16879
16880@@ -132,6 +255,26 @@ BEGIN(dec_return)
16881 movl 4(v), %edx
16882 subl $1, %eax
16883 sbbl $0, %edx
16884+
16885+#ifdef CONFIG_PAX_REFCOUNT
16886+ into
16887+1234:
16888+ _ASM_EXTABLE(1234b, 2f)
16889+#endif
16890+
16891+ movl %eax, (v)
16892+ movl %edx, 4(v)
16893+
16894+#ifdef CONFIG_PAX_REFCOUNT
16895+2:
16896+#endif
16897+
16898+RET_ENDP
16899+BEGIN(dec_return_unchecked)
16900+ movl (v), %eax
16901+ movl 4(v), %edx
16902+ subl $1, %eax
16903+ sbbl $0, %edx
16904 movl %eax, (v)
16905 movl %edx, 4(v)
16906 RET_ENDP
16907@@ -143,6 +286,13 @@ BEGIN(add_unless)
16908 adcl %edx, %edi
16909 addl (v), %eax
16910 adcl 4(v), %edx
16911+
16912+#ifdef CONFIG_PAX_REFCOUNT
16913+ into
16914+1234:
16915+ _ASM_EXTABLE(1234b, 2f)
16916+#endif
16917+
16918 cmpl %eax, %esi
16919 je 3f
16920 1:
16921@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16922 1:
16923 addl $1, %eax
16924 adcl $0, %edx
16925+
16926+#ifdef CONFIG_PAX_REFCOUNT
16927+ into
16928+1234:
16929+ _ASM_EXTABLE(1234b, 2f)
16930+#endif
16931+
16932 movl %eax, (v)
16933 movl %edx, 4(v)
16934 movl $1, %eax
16935@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16936 movl 4(v), %edx
16937 subl $1, %eax
16938 sbbl $0, %edx
16939+
16940+#ifdef CONFIG_PAX_REFCOUNT
16941+ into
16942+1234:
16943+ _ASM_EXTABLE(1234b, 1f)
16944+#endif
16945+
16946 js 1f
16947 movl %eax, (v)
16948 movl %edx, 4(v)
16949diff -urNp linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S
16950--- linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-07-21 22:17:23.000000000 -0400
16951+++ linux-3.0.7/arch/x86/lib/atomic64_cx8_32.S 2011-10-06 04:17:55.000000000 -0400
16952@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
16953 CFI_STARTPROC
16954
16955 read64 %ecx
16956+ pax_force_retaddr
16957 ret
16958 CFI_ENDPROC
16959 ENDPROC(atomic64_read_cx8)
16960
16961+ENTRY(atomic64_read_unchecked_cx8)
16962+ CFI_STARTPROC
16963+
16964+ read64 %ecx
16965+ pax_force_retaddr
16966+ ret
16967+ CFI_ENDPROC
16968+ENDPROC(atomic64_read_unchecked_cx8)
16969+
16970 ENTRY(atomic64_set_cx8)
16971 CFI_STARTPROC
16972
16973@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
16974 cmpxchg8b (%esi)
16975 jne 1b
16976
16977+ pax_force_retaddr
16978 ret
16979 CFI_ENDPROC
16980 ENDPROC(atomic64_set_cx8)
16981
16982+ENTRY(atomic64_set_unchecked_cx8)
16983+ CFI_STARTPROC
16984+
16985+1:
16986+/* we don't need LOCK_PREFIX since aligned 64-bit writes
16987+ * are atomic on 586 and newer */
16988+ cmpxchg8b (%esi)
16989+ jne 1b
16990+
16991+ pax_force_retaddr
16992+ ret
16993+ CFI_ENDPROC
16994+ENDPROC(atomic64_set_unchecked_cx8)
16995+
16996 ENTRY(atomic64_xchg_cx8)
16997 CFI_STARTPROC
16998
16999@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17000 cmpxchg8b (%esi)
17001 jne 1b
17002
17003+ pax_force_retaddr
17004 ret
17005 CFI_ENDPROC
17006 ENDPROC(atomic64_xchg_cx8)
17007
17008-.macro addsub_return func ins insc
17009-ENTRY(atomic64_\func\()_return_cx8)
17010+.macro addsub_return func ins insc unchecked=""
17011+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17012 CFI_STARTPROC
17013 SAVE ebp
17014 SAVE ebx
17015@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17016 movl %edx, %ecx
17017 \ins\()l %esi, %ebx
17018 \insc\()l %edi, %ecx
17019+
17020+.ifb \unchecked
17021+#ifdef CONFIG_PAX_REFCOUNT
17022+ into
17023+2:
17024+ _ASM_EXTABLE(2b, 3f)
17025+#endif
17026+.endif
17027+
17028 LOCK_PREFIX
17029 cmpxchg8b (%ebp)
17030 jne 1b
17031-
17032-10:
17033 movl %ebx, %eax
17034 movl %ecx, %edx
17035+
17036+.ifb \unchecked
17037+#ifdef CONFIG_PAX_REFCOUNT
17038+3:
17039+#endif
17040+.endif
17041+
17042 RESTORE edi
17043 RESTORE esi
17044 RESTORE ebx
17045 RESTORE ebp
17046+ pax_force_retaddr
17047 ret
17048 CFI_ENDPROC
17049-ENDPROC(atomic64_\func\()_return_cx8)
17050+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17051 .endm
17052
17053 addsub_return add add adc
17054 addsub_return sub sub sbb
17055+addsub_return add add adc _unchecked
17056+addsub_return sub sub sbb _unchecked
17057
17058-.macro incdec_return func ins insc
17059-ENTRY(atomic64_\func\()_return_cx8)
17060+.macro incdec_return func ins insc unchecked
17061+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17062 CFI_STARTPROC
17063 SAVE ebx
17064
17065@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17066 movl %edx, %ecx
17067 \ins\()l $1, %ebx
17068 \insc\()l $0, %ecx
17069+
17070+.ifb \unchecked
17071+#ifdef CONFIG_PAX_REFCOUNT
17072+ into
17073+2:
17074+ _ASM_EXTABLE(2b, 3f)
17075+#endif
17076+.endif
17077+
17078 LOCK_PREFIX
17079 cmpxchg8b (%esi)
17080 jne 1b
17081
17082-10:
17083 movl %ebx, %eax
17084 movl %ecx, %edx
17085+
17086+.ifb \unchecked
17087+#ifdef CONFIG_PAX_REFCOUNT
17088+3:
17089+#endif
17090+.endif
17091+
17092 RESTORE ebx
17093+ pax_force_retaddr
17094 ret
17095 CFI_ENDPROC
17096-ENDPROC(atomic64_\func\()_return_cx8)
17097+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17098 .endm
17099
17100 incdec_return inc add adc
17101 incdec_return dec sub sbb
17102+incdec_return inc add adc _unchecked
17103+incdec_return dec sub sbb _unchecked
17104
17105 ENTRY(atomic64_dec_if_positive_cx8)
17106 CFI_STARTPROC
17107@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17108 movl %edx, %ecx
17109 subl $1, %ebx
17110 sbb $0, %ecx
17111+
17112+#ifdef CONFIG_PAX_REFCOUNT
17113+ into
17114+1234:
17115+ _ASM_EXTABLE(1234b, 2f)
17116+#endif
17117+
17118 js 2f
17119 LOCK_PREFIX
17120 cmpxchg8b (%esi)
17121@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17122 movl %ebx, %eax
17123 movl %ecx, %edx
17124 RESTORE ebx
17125+ pax_force_retaddr
17126 ret
17127 CFI_ENDPROC
17128 ENDPROC(atomic64_dec_if_positive_cx8)
17129@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17130 movl %edx, %ecx
17131 addl %esi, %ebx
17132 adcl %edi, %ecx
17133+
17134+#ifdef CONFIG_PAX_REFCOUNT
17135+ into
17136+1234:
17137+ _ASM_EXTABLE(1234b, 3f)
17138+#endif
17139+
17140 LOCK_PREFIX
17141 cmpxchg8b (%ebp)
17142 jne 1b
17143@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17144 CFI_ADJUST_CFA_OFFSET -8
17145 RESTORE ebx
17146 RESTORE ebp
17147+ pax_force_retaddr
17148 ret
17149 4:
17150 cmpl %edx, 4(%esp)
17151@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17152 movl %edx, %ecx
17153 addl $1, %ebx
17154 adcl $0, %ecx
17155+
17156+#ifdef CONFIG_PAX_REFCOUNT
17157+ into
17158+1234:
17159+ _ASM_EXTABLE(1234b, 3f)
17160+#endif
17161+
17162 LOCK_PREFIX
17163 cmpxchg8b (%esi)
17164 jne 1b
17165@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17166 movl $1, %eax
17167 3:
17168 RESTORE ebx
17169+ pax_force_retaddr
17170 ret
17171 4:
17172 testl %edx, %edx
17173diff -urNp linux-3.0.7/arch/x86/lib/checksum_32.S linux-3.0.7/arch/x86/lib/checksum_32.S
17174--- linux-3.0.7/arch/x86/lib/checksum_32.S 2011-07-21 22:17:23.000000000 -0400
17175+++ linux-3.0.7/arch/x86/lib/checksum_32.S 2011-08-23 21:47:55.000000000 -0400
17176@@ -28,7 +28,8 @@
17177 #include <linux/linkage.h>
17178 #include <asm/dwarf2.h>
17179 #include <asm/errno.h>
17180-
17181+#include <asm/segment.h>
17182+
17183 /*
17184 * computes a partial checksum, e.g. for TCP/UDP fragments
17185 */
17186@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17187
17188 #define ARGBASE 16
17189 #define FP 12
17190-
17191-ENTRY(csum_partial_copy_generic)
17192+
17193+ENTRY(csum_partial_copy_generic_to_user)
17194 CFI_STARTPROC
17195+
17196+#ifdef CONFIG_PAX_MEMORY_UDEREF
17197+ pushl_cfi %gs
17198+ popl_cfi %es
17199+ jmp csum_partial_copy_generic
17200+#endif
17201+
17202+ENTRY(csum_partial_copy_generic_from_user)
17203+
17204+#ifdef CONFIG_PAX_MEMORY_UDEREF
17205+ pushl_cfi %gs
17206+ popl_cfi %ds
17207+#endif
17208+
17209+ENTRY(csum_partial_copy_generic)
17210 subl $4,%esp
17211 CFI_ADJUST_CFA_OFFSET 4
17212 pushl_cfi %edi
17213@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17214 jmp 4f
17215 SRC(1: movw (%esi), %bx )
17216 addl $2, %esi
17217-DST( movw %bx, (%edi) )
17218+DST( movw %bx, %es:(%edi) )
17219 addl $2, %edi
17220 addw %bx, %ax
17221 adcl $0, %eax
17222@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17223 SRC(1: movl (%esi), %ebx )
17224 SRC( movl 4(%esi), %edx )
17225 adcl %ebx, %eax
17226-DST( movl %ebx, (%edi) )
17227+DST( movl %ebx, %es:(%edi) )
17228 adcl %edx, %eax
17229-DST( movl %edx, 4(%edi) )
17230+DST( movl %edx, %es:4(%edi) )
17231
17232 SRC( movl 8(%esi), %ebx )
17233 SRC( movl 12(%esi), %edx )
17234 adcl %ebx, %eax
17235-DST( movl %ebx, 8(%edi) )
17236+DST( movl %ebx, %es:8(%edi) )
17237 adcl %edx, %eax
17238-DST( movl %edx, 12(%edi) )
17239+DST( movl %edx, %es:12(%edi) )
17240
17241 SRC( movl 16(%esi), %ebx )
17242 SRC( movl 20(%esi), %edx )
17243 adcl %ebx, %eax
17244-DST( movl %ebx, 16(%edi) )
17245+DST( movl %ebx, %es:16(%edi) )
17246 adcl %edx, %eax
17247-DST( movl %edx, 20(%edi) )
17248+DST( movl %edx, %es:20(%edi) )
17249
17250 SRC( movl 24(%esi), %ebx )
17251 SRC( movl 28(%esi), %edx )
17252 adcl %ebx, %eax
17253-DST( movl %ebx, 24(%edi) )
17254+DST( movl %ebx, %es:24(%edi) )
17255 adcl %edx, %eax
17256-DST( movl %edx, 28(%edi) )
17257+DST( movl %edx, %es:28(%edi) )
17258
17259 lea 32(%esi), %esi
17260 lea 32(%edi), %edi
17261@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17262 shrl $2, %edx # This clears CF
17263 SRC(3: movl (%esi), %ebx )
17264 adcl %ebx, %eax
17265-DST( movl %ebx, (%edi) )
17266+DST( movl %ebx, %es:(%edi) )
17267 lea 4(%esi), %esi
17268 lea 4(%edi), %edi
17269 dec %edx
17270@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17271 jb 5f
17272 SRC( movw (%esi), %cx )
17273 leal 2(%esi), %esi
17274-DST( movw %cx, (%edi) )
17275+DST( movw %cx, %es:(%edi) )
17276 leal 2(%edi), %edi
17277 je 6f
17278 shll $16,%ecx
17279 SRC(5: movb (%esi), %cl )
17280-DST( movb %cl, (%edi) )
17281+DST( movb %cl, %es:(%edi) )
17282 6: addl %ecx, %eax
17283 adcl $0, %eax
17284 7:
17285@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17286
17287 6001:
17288 movl ARGBASE+20(%esp), %ebx # src_err_ptr
17289- movl $-EFAULT, (%ebx)
17290+ movl $-EFAULT, %ss:(%ebx)
17291
17292 # zero the complete destination - computing the rest
17293 # is too much work
17294@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17295
17296 6002:
17297 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17298- movl $-EFAULT,(%ebx)
17299+ movl $-EFAULT,%ss:(%ebx)
17300 jmp 5000b
17301
17302 .previous
17303
17304+ pushl_cfi %ss
17305+ popl_cfi %ds
17306+ pushl_cfi %ss
17307+ popl_cfi %es
17308 popl_cfi %ebx
17309 CFI_RESTORE ebx
17310 popl_cfi %esi
17311@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17312 popl_cfi %ecx # equivalent to addl $4,%esp
17313 ret
17314 CFI_ENDPROC
17315-ENDPROC(csum_partial_copy_generic)
17316+ENDPROC(csum_partial_copy_generic_to_user)
17317
17318 #else
17319
17320 /* Version for PentiumII/PPro */
17321
17322 #define ROUND1(x) \
17323+ nop; nop; nop; \
17324 SRC(movl x(%esi), %ebx ) ; \
17325 addl %ebx, %eax ; \
17326- DST(movl %ebx, x(%edi) ) ;
17327+ DST(movl %ebx, %es:x(%edi)) ;
17328
17329 #define ROUND(x) \
17330+ nop; nop; nop; \
17331 SRC(movl x(%esi), %ebx ) ; \
17332 adcl %ebx, %eax ; \
17333- DST(movl %ebx, x(%edi) ) ;
17334+ DST(movl %ebx, %es:x(%edi)) ;
17335
17336 #define ARGBASE 12
17337-
17338-ENTRY(csum_partial_copy_generic)
17339+
17340+ENTRY(csum_partial_copy_generic_to_user)
17341 CFI_STARTPROC
17342+
17343+#ifdef CONFIG_PAX_MEMORY_UDEREF
17344+ pushl_cfi %gs
17345+ popl_cfi %es
17346+ jmp csum_partial_copy_generic
17347+#endif
17348+
17349+ENTRY(csum_partial_copy_generic_from_user)
17350+
17351+#ifdef CONFIG_PAX_MEMORY_UDEREF
17352+ pushl_cfi %gs
17353+ popl_cfi %ds
17354+#endif
17355+
17356+ENTRY(csum_partial_copy_generic)
17357 pushl_cfi %ebx
17358 CFI_REL_OFFSET ebx, 0
17359 pushl_cfi %edi
17360@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17361 subl %ebx, %edi
17362 lea -1(%esi),%edx
17363 andl $-32,%edx
17364- lea 3f(%ebx,%ebx), %ebx
17365+ lea 3f(%ebx,%ebx,2), %ebx
17366 testl %esi, %esi
17367 jmp *%ebx
17368 1: addl $64,%esi
17369@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17370 jb 5f
17371 SRC( movw (%esi), %dx )
17372 leal 2(%esi), %esi
17373-DST( movw %dx, (%edi) )
17374+DST( movw %dx, %es:(%edi) )
17375 leal 2(%edi), %edi
17376 je 6f
17377 shll $16,%edx
17378 5:
17379 SRC( movb (%esi), %dl )
17380-DST( movb %dl, (%edi) )
17381+DST( movb %dl, %es:(%edi) )
17382 6: addl %edx, %eax
17383 adcl $0, %eax
17384 7:
17385 .section .fixup, "ax"
17386 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17387- movl $-EFAULT, (%ebx)
17388+ movl $-EFAULT, %ss:(%ebx)
17389 # zero the complete destination (computing the rest is too much work)
17390 movl ARGBASE+8(%esp),%edi # dst
17391 movl ARGBASE+12(%esp),%ecx # len
17392@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17393 rep; stosb
17394 jmp 7b
17395 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17396- movl $-EFAULT, (%ebx)
17397+ movl $-EFAULT, %ss:(%ebx)
17398 jmp 7b
17399 .previous
17400
17401+#ifdef CONFIG_PAX_MEMORY_UDEREF
17402+ pushl_cfi %ss
17403+ popl_cfi %ds
17404+ pushl_cfi %ss
17405+ popl_cfi %es
17406+#endif
17407+
17408 popl_cfi %esi
17409 CFI_RESTORE esi
17410 popl_cfi %edi
17411@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17412 CFI_RESTORE ebx
17413 ret
17414 CFI_ENDPROC
17415-ENDPROC(csum_partial_copy_generic)
17416+ENDPROC(csum_partial_copy_generic_to_user)
17417
17418 #undef ROUND
17419 #undef ROUND1
17420diff -urNp linux-3.0.7/arch/x86/lib/clear_page_64.S linux-3.0.7/arch/x86/lib/clear_page_64.S
17421--- linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-07-21 22:17:23.000000000 -0400
17422+++ linux-3.0.7/arch/x86/lib/clear_page_64.S 2011-10-06 04:17:55.000000000 -0400
17423@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
17424 movl $4096/8,%ecx
17425 xorl %eax,%eax
17426 rep stosq
17427+ pax_force_retaddr
17428 ret
17429 CFI_ENDPROC
17430 ENDPROC(clear_page_c)
17431@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
17432 movl $4096,%ecx
17433 xorl %eax,%eax
17434 rep stosb
17435+ pax_force_retaddr
17436 ret
17437 CFI_ENDPROC
17438 ENDPROC(clear_page_c_e)
17439@@ -43,6 +45,7 @@ ENTRY(clear_page)
17440 leaq 64(%rdi),%rdi
17441 jnz .Lloop
17442 nop
17443+ pax_force_retaddr
17444 ret
17445 CFI_ENDPROC
17446 .Lclear_page_end:
17447@@ -58,7 +61,7 @@ ENDPROC(clear_page)
17448
17449 #include <asm/cpufeature.h>
17450
17451- .section .altinstr_replacement,"ax"
17452+ .section .altinstr_replacement,"a"
17453 1: .byte 0xeb /* jmp <disp8> */
17454 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17455 2: .byte 0xeb /* jmp <disp8> */
17456diff -urNp linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S
17457--- linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-07-21 22:17:23.000000000 -0400
17458+++ linux-3.0.7/arch/x86/lib/cmpxchg16b_emu.S 2011-10-07 19:07:28.000000000 -0400
17459@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
17460
17461 popf
17462 mov $1, %al
17463+ pax_force_retaddr
17464 ret
17465
17466 not_same:
17467 popf
17468 xor %al,%al
17469+ pax_force_retaddr
17470 ret
17471
17472 CFI_ENDPROC
17473diff -urNp linux-3.0.7/arch/x86/lib/copy_page_64.S linux-3.0.7/arch/x86/lib/copy_page_64.S
17474--- linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-07-21 22:17:23.000000000 -0400
17475+++ linux-3.0.7/arch/x86/lib/copy_page_64.S 2011-10-06 04:17:55.000000000 -0400
17476@@ -2,12 +2,14 @@
17477
17478 #include <linux/linkage.h>
17479 #include <asm/dwarf2.h>
17480+#include <asm/alternative-asm.h>
17481
17482 ALIGN
17483 copy_page_c:
17484 CFI_STARTPROC
17485 movl $4096/8,%ecx
17486 rep movsq
17487+ pax_force_retaddr
17488 ret
17489 CFI_ENDPROC
17490 ENDPROC(copy_page_c)
17491@@ -94,6 +96,7 @@ ENTRY(copy_page)
17492 CFI_RESTORE r13
17493 addq $3*8,%rsp
17494 CFI_ADJUST_CFA_OFFSET -3*8
17495+ pax_force_retaddr
17496 ret
17497 .Lcopy_page_end:
17498 CFI_ENDPROC
17499@@ -104,7 +107,7 @@ ENDPROC(copy_page)
17500
17501 #include <asm/cpufeature.h>
17502
17503- .section .altinstr_replacement,"ax"
17504+ .section .altinstr_replacement,"a"
17505 1: .byte 0xeb /* jmp <disp8> */
17506 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17507 2:
17508diff -urNp linux-3.0.7/arch/x86/lib/copy_user_64.S linux-3.0.7/arch/x86/lib/copy_user_64.S
17509--- linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-07-21 22:17:23.000000000 -0400
17510+++ linux-3.0.7/arch/x86/lib/copy_user_64.S 2011-10-06 04:17:55.000000000 -0400
17511@@ -16,6 +16,7 @@
17512 #include <asm/thread_info.h>
17513 #include <asm/cpufeature.h>
17514 #include <asm/alternative-asm.h>
17515+#include <asm/pgtable.h>
17516
17517 /*
17518 * By placing feature2 after feature1 in altinstructions section, we logically
17519@@ -29,7 +30,7 @@
17520 .byte 0xe9 /* 32bit jump */
17521 .long \orig-1f /* by default jump to orig */
17522 1:
17523- .section .altinstr_replacement,"ax"
17524+ .section .altinstr_replacement,"a"
17525 2: .byte 0xe9 /* near jump with 32bit immediate */
17526 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
17527 3: .byte 0xe9 /* near jump with 32bit immediate */
17528@@ -71,47 +72,20 @@
17529 #endif
17530 .endm
17531
17532-/* Standard copy_to_user with segment limit checking */
17533-ENTRY(_copy_to_user)
17534- CFI_STARTPROC
17535- GET_THREAD_INFO(%rax)
17536- movq %rdi,%rcx
17537- addq %rdx,%rcx
17538- jc bad_to_user
17539- cmpq TI_addr_limit(%rax),%rcx
17540- ja bad_to_user
17541- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17542- copy_user_generic_unrolled,copy_user_generic_string, \
17543- copy_user_enhanced_fast_string
17544- CFI_ENDPROC
17545-ENDPROC(_copy_to_user)
17546-
17547-/* Standard copy_from_user with segment limit checking */
17548-ENTRY(_copy_from_user)
17549- CFI_STARTPROC
17550- GET_THREAD_INFO(%rax)
17551- movq %rsi,%rcx
17552- addq %rdx,%rcx
17553- jc bad_from_user
17554- cmpq TI_addr_limit(%rax),%rcx
17555- ja bad_from_user
17556- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
17557- copy_user_generic_unrolled,copy_user_generic_string, \
17558- copy_user_enhanced_fast_string
17559- CFI_ENDPROC
17560-ENDPROC(_copy_from_user)
17561-
17562 .section .fixup,"ax"
17563 /* must zero dest */
17564 ENTRY(bad_from_user)
17565 bad_from_user:
17566 CFI_STARTPROC
17567+ testl %edx,%edx
17568+ js bad_to_user
17569 movl %edx,%ecx
17570 xorl %eax,%eax
17571 rep
17572 stosb
17573 bad_to_user:
17574 movl %edx,%eax
17575+ pax_force_retaddr
17576 ret
17577 CFI_ENDPROC
17578 ENDPROC(bad_from_user)
17579@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
17580 decl %ecx
17581 jnz 21b
17582 23: xor %eax,%eax
17583+ pax_force_retaddr
17584 ret
17585
17586 .section .fixup,"ax"
17587@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
17588 3: rep
17589 movsb
17590 4: xorl %eax,%eax
17591+ pax_force_retaddr
17592 ret
17593
17594 .section .fixup,"ax"
17595@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
17596 1: rep
17597 movsb
17598 2: xorl %eax,%eax
17599+ pax_force_retaddr
17600 ret
17601
17602 .section .fixup,"ax"
17603diff -urNp linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S
17604--- linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-07-21 22:17:23.000000000 -0400
17605+++ linux-3.0.7/arch/x86/lib/copy_user_nocache_64.S 2011-10-06 04:17:55.000000000 -0400
17606@@ -8,12 +8,14 @@
17607
17608 #include <linux/linkage.h>
17609 #include <asm/dwarf2.h>
17610+#include <asm/alternative-asm.h>
17611
17612 #define FIX_ALIGNMENT 1
17613
17614 #include <asm/current.h>
17615 #include <asm/asm-offsets.h>
17616 #include <asm/thread_info.h>
17617+#include <asm/pgtable.h>
17618
17619 .macro ALIGN_DESTINATION
17620 #ifdef FIX_ALIGNMENT
17621@@ -50,6 +52,15 @@
17622 */
17623 ENTRY(__copy_user_nocache)
17624 CFI_STARTPROC
17625+
17626+#ifdef CONFIG_PAX_MEMORY_UDEREF
17627+ mov $PAX_USER_SHADOW_BASE,%rcx
17628+ cmp %rcx,%rsi
17629+ jae 1f
17630+ add %rcx,%rsi
17631+1:
17632+#endif
17633+
17634 cmpl $8,%edx
17635 jb 20f /* less then 8 bytes, go to byte copy loop */
17636 ALIGN_DESTINATION
17637@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
17638 jnz 21b
17639 23: xorl %eax,%eax
17640 sfence
17641+ pax_force_retaddr
17642 ret
17643
17644 .section .fixup,"ax"
17645diff -urNp linux-3.0.7/arch/x86/lib/csum-copy_64.S linux-3.0.7/arch/x86/lib/csum-copy_64.S
17646--- linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-07-21 22:17:23.000000000 -0400
17647+++ linux-3.0.7/arch/x86/lib/csum-copy_64.S 2011-10-06 04:17:55.000000000 -0400
17648@@ -8,6 +8,7 @@
17649 #include <linux/linkage.h>
17650 #include <asm/dwarf2.h>
17651 #include <asm/errno.h>
17652+#include <asm/alternative-asm.h>
17653
17654 /*
17655 * Checksum copy with exception handling.
17656@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
17657 CFI_RESTORE rbp
17658 addq $7*8, %rsp
17659 CFI_ADJUST_CFA_OFFSET -7*8
17660+ pax_force_retaddr
17661 ret
17662 CFI_RESTORE_STATE
17663
17664diff -urNp linux-3.0.7/arch/x86/lib/csum-wrappers_64.c linux-3.0.7/arch/x86/lib/csum-wrappers_64.c
17665--- linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-07-21 22:17:23.000000000 -0400
17666+++ linux-3.0.7/arch/x86/lib/csum-wrappers_64.c 2011-10-06 04:17:55.000000000 -0400
17667@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
17668 len -= 2;
17669 }
17670 }
17671- isum = csum_partial_copy_generic((__force const void *)src,
17672+
17673+#ifdef CONFIG_PAX_MEMORY_UDEREF
17674+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17675+ src += PAX_USER_SHADOW_BASE;
17676+#endif
17677+
17678+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
17679 dst, len, isum, errp, NULL);
17680 if (unlikely(*errp))
17681 goto out_err;
17682@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
17683 }
17684
17685 *errp = 0;
17686- return csum_partial_copy_generic(src, (void __force *)dst,
17687+
17688+#ifdef CONFIG_PAX_MEMORY_UDEREF
17689+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17690+ dst += PAX_USER_SHADOW_BASE;
17691+#endif
17692+
17693+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
17694 len, isum, NULL, errp);
17695 }
17696 EXPORT_SYMBOL(csum_partial_copy_to_user);
17697diff -urNp linux-3.0.7/arch/x86/lib/getuser.S linux-3.0.7/arch/x86/lib/getuser.S
17698--- linux-3.0.7/arch/x86/lib/getuser.S 2011-07-21 22:17:23.000000000 -0400
17699+++ linux-3.0.7/arch/x86/lib/getuser.S 2011-10-07 19:07:23.000000000 -0400
17700@@ -33,15 +33,38 @@
17701 #include <asm/asm-offsets.h>
17702 #include <asm/thread_info.h>
17703 #include <asm/asm.h>
17704+#include <asm/segment.h>
17705+#include <asm/pgtable.h>
17706+#include <asm/alternative-asm.h>
17707+
17708+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17709+#define __copyuser_seg gs;
17710+#else
17711+#define __copyuser_seg
17712+#endif
17713
17714 .text
17715 ENTRY(__get_user_1)
17716 CFI_STARTPROC
17717+
17718+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17719 GET_THREAD_INFO(%_ASM_DX)
17720 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17721 jae bad_get_user
17722-1: movzb (%_ASM_AX),%edx
17723+
17724+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17725+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17726+ cmp %_ASM_DX,%_ASM_AX
17727+ jae 1234f
17728+ add %_ASM_DX,%_ASM_AX
17729+1234:
17730+#endif
17731+
17732+#endif
17733+
17734+1: __copyuser_seg movzb (%_ASM_AX),%edx
17735 xor %eax,%eax
17736+ pax_force_retaddr
17737 ret
17738 CFI_ENDPROC
17739 ENDPROC(__get_user_1)
17740@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
17741 ENTRY(__get_user_2)
17742 CFI_STARTPROC
17743 add $1,%_ASM_AX
17744+
17745+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17746 jc bad_get_user
17747 GET_THREAD_INFO(%_ASM_DX)
17748 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17749 jae bad_get_user
17750-2: movzwl -1(%_ASM_AX),%edx
17751+
17752+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17753+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17754+ cmp %_ASM_DX,%_ASM_AX
17755+ jae 1234f
17756+ add %_ASM_DX,%_ASM_AX
17757+1234:
17758+#endif
17759+
17760+#endif
17761+
17762+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17763 xor %eax,%eax
17764+ pax_force_retaddr
17765 ret
17766 CFI_ENDPROC
17767 ENDPROC(__get_user_2)
17768@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
17769 ENTRY(__get_user_4)
17770 CFI_STARTPROC
17771 add $3,%_ASM_AX
17772+
17773+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17774 jc bad_get_user
17775 GET_THREAD_INFO(%_ASM_DX)
17776 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17777 jae bad_get_user
17778-3: mov -3(%_ASM_AX),%edx
17779+
17780+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17781+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17782+ cmp %_ASM_DX,%_ASM_AX
17783+ jae 1234f
17784+ add %_ASM_DX,%_ASM_AX
17785+1234:
17786+#endif
17787+
17788+#endif
17789+
17790+3: __copyuser_seg mov -3(%_ASM_AX),%edx
17791 xor %eax,%eax
17792+ pax_force_retaddr
17793 ret
17794 CFI_ENDPROC
17795 ENDPROC(__get_user_4)
17796@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
17797 GET_THREAD_INFO(%_ASM_DX)
17798 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17799 jae bad_get_user
17800+
17801+#ifdef CONFIG_PAX_MEMORY_UDEREF
17802+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17803+ cmp %_ASM_DX,%_ASM_AX
17804+ jae 1234f
17805+ add %_ASM_DX,%_ASM_AX
17806+1234:
17807+#endif
17808+
17809 4: movq -7(%_ASM_AX),%_ASM_DX
17810 xor %eax,%eax
17811+ pax_force_retaddr
17812 ret
17813 CFI_ENDPROC
17814 ENDPROC(__get_user_8)
17815@@ -91,6 +152,7 @@ bad_get_user:
17816 CFI_STARTPROC
17817 xor %edx,%edx
17818 mov $(-EFAULT),%_ASM_AX
17819+ pax_force_retaddr
17820 ret
17821 CFI_ENDPROC
17822 END(bad_get_user)
17823diff -urNp linux-3.0.7/arch/x86/lib/insn.c linux-3.0.7/arch/x86/lib/insn.c
17824--- linux-3.0.7/arch/x86/lib/insn.c 2011-07-21 22:17:23.000000000 -0400
17825+++ linux-3.0.7/arch/x86/lib/insn.c 2011-08-23 21:47:55.000000000 -0400
17826@@ -21,6 +21,11 @@
17827 #include <linux/string.h>
17828 #include <asm/inat.h>
17829 #include <asm/insn.h>
17830+#ifdef __KERNEL__
17831+#include <asm/pgtable_types.h>
17832+#else
17833+#define ktla_ktva(addr) addr
17834+#endif
17835
17836 #define get_next(t, insn) \
17837 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17838@@ -40,8 +45,8 @@
17839 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17840 {
17841 memset(insn, 0, sizeof(*insn));
17842- insn->kaddr = kaddr;
17843- insn->next_byte = kaddr;
17844+ insn->kaddr = ktla_ktva(kaddr);
17845+ insn->next_byte = ktla_ktva(kaddr);
17846 insn->x86_64 = x86_64 ? 1 : 0;
17847 insn->opnd_bytes = 4;
17848 if (x86_64)
17849diff -urNp linux-3.0.7/arch/x86/lib/iomap_copy_64.S linux-3.0.7/arch/x86/lib/iomap_copy_64.S
17850--- linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-07-21 22:17:23.000000000 -0400
17851+++ linux-3.0.7/arch/x86/lib/iomap_copy_64.S 2011-10-06 04:17:55.000000000 -0400
17852@@ -17,6 +17,7 @@
17853
17854 #include <linux/linkage.h>
17855 #include <asm/dwarf2.h>
17856+#include <asm/alternative-asm.h>
17857
17858 /*
17859 * override generic version in lib/iomap_copy.c
17860@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
17861 CFI_STARTPROC
17862 movl %edx,%ecx
17863 rep movsd
17864+ pax_force_retaddr
17865 ret
17866 CFI_ENDPROC
17867 ENDPROC(__iowrite32_copy)
17868diff -urNp linux-3.0.7/arch/x86/lib/memcpy_64.S linux-3.0.7/arch/x86/lib/memcpy_64.S
17869--- linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-07-21 22:17:23.000000000 -0400
17870+++ linux-3.0.7/arch/x86/lib/memcpy_64.S 2011-10-06 04:17:55.000000000 -0400
17871@@ -34,6 +34,7 @@
17872 rep movsq
17873 movl %edx, %ecx
17874 rep movsb
17875+ pax_force_retaddr
17876 ret
17877 .Lmemcpy_e:
17878 .previous
17879@@ -51,6 +52,7 @@
17880
17881 movl %edx, %ecx
17882 rep movsb
17883+ pax_force_retaddr
17884 ret
17885 .Lmemcpy_e_e:
17886 .previous
17887@@ -141,6 +143,7 @@ ENTRY(memcpy)
17888 movq %r9, 1*8(%rdi)
17889 movq %r10, -2*8(%rdi, %rdx)
17890 movq %r11, -1*8(%rdi, %rdx)
17891+ pax_force_retaddr
17892 retq
17893 .p2align 4
17894 .Lless_16bytes:
17895@@ -153,6 +156,7 @@ ENTRY(memcpy)
17896 movq -1*8(%rsi, %rdx), %r9
17897 movq %r8, 0*8(%rdi)
17898 movq %r9, -1*8(%rdi, %rdx)
17899+ pax_force_retaddr
17900 retq
17901 .p2align 4
17902 .Lless_8bytes:
17903@@ -166,6 +170,7 @@ ENTRY(memcpy)
17904 movl -4(%rsi, %rdx), %r8d
17905 movl %ecx, (%rdi)
17906 movl %r8d, -4(%rdi, %rdx)
17907+ pax_force_retaddr
17908 retq
17909 .p2align 4
17910 .Lless_3bytes:
17911@@ -183,6 +188,7 @@ ENTRY(memcpy)
17912 jnz .Lloop_1
17913
17914 .Lend:
17915+ pax_force_retaddr
17916 retq
17917 CFI_ENDPROC
17918 ENDPROC(memcpy)
17919diff -urNp linux-3.0.7/arch/x86/lib/memmove_64.S linux-3.0.7/arch/x86/lib/memmove_64.S
17920--- linux-3.0.7/arch/x86/lib/memmove_64.S 2011-07-21 22:17:23.000000000 -0400
17921+++ linux-3.0.7/arch/x86/lib/memmove_64.S 2011-10-06 04:17:55.000000000 -0400
17922@@ -9,6 +9,7 @@
17923 #include <linux/linkage.h>
17924 #include <asm/dwarf2.h>
17925 #include <asm/cpufeature.h>
17926+#include <asm/alternative-asm.h>
17927
17928 #undef memmove
17929
17930@@ -201,6 +202,7 @@ ENTRY(memmove)
17931 movb (%rsi), %r11b
17932 movb %r11b, (%rdi)
17933 13:
17934+ pax_force_retaddr
17935 retq
17936 CFI_ENDPROC
17937
17938@@ -209,6 +211,7 @@ ENTRY(memmove)
17939 /* Forward moving data. */
17940 movq %rdx, %rcx
17941 rep movsb
17942+ pax_force_retaddr
17943 retq
17944 .Lmemmove_end_forward_efs:
17945 .previous
17946diff -urNp linux-3.0.7/arch/x86/lib/memset_64.S linux-3.0.7/arch/x86/lib/memset_64.S
17947--- linux-3.0.7/arch/x86/lib/memset_64.S 2011-07-21 22:17:23.000000000 -0400
17948+++ linux-3.0.7/arch/x86/lib/memset_64.S 2011-10-06 04:17:55.000000000 -0400
17949@@ -31,6 +31,7 @@
17950 movl %r8d,%ecx
17951 rep stosb
17952 movq %r9,%rax
17953+ pax_force_retaddr
17954 ret
17955 .Lmemset_e:
17956 .previous
17957@@ -53,6 +54,7 @@
17958 movl %edx,%ecx
17959 rep stosb
17960 movq %r9,%rax
17961+ pax_force_retaddr
17962 ret
17963 .Lmemset_e_e:
17964 .previous
17965@@ -121,6 +123,7 @@ ENTRY(__memset)
17966
17967 .Lende:
17968 movq %r10,%rax
17969+ pax_force_retaddr
17970 ret
17971
17972 CFI_RESTORE_STATE
17973diff -urNp linux-3.0.7/arch/x86/lib/mmx_32.c linux-3.0.7/arch/x86/lib/mmx_32.c
17974--- linux-3.0.7/arch/x86/lib/mmx_32.c 2011-07-21 22:17:23.000000000 -0400
17975+++ linux-3.0.7/arch/x86/lib/mmx_32.c 2011-08-23 21:47:55.000000000 -0400
17976@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17977 {
17978 void *p;
17979 int i;
17980+ unsigned long cr0;
17981
17982 if (unlikely(in_interrupt()))
17983 return __memcpy(to, from, len);
17984@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17985 kernel_fpu_begin();
17986
17987 __asm__ __volatile__ (
17988- "1: prefetch (%0)\n" /* This set is 28 bytes */
17989- " prefetch 64(%0)\n"
17990- " prefetch 128(%0)\n"
17991- " prefetch 192(%0)\n"
17992- " prefetch 256(%0)\n"
17993+ "1: prefetch (%1)\n" /* This set is 28 bytes */
17994+ " prefetch 64(%1)\n"
17995+ " prefetch 128(%1)\n"
17996+ " prefetch 192(%1)\n"
17997+ " prefetch 256(%1)\n"
17998 "2: \n"
17999 ".section .fixup, \"ax\"\n"
18000- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18001+ "3: \n"
18002+
18003+#ifdef CONFIG_PAX_KERNEXEC
18004+ " movl %%cr0, %0\n"
18005+ " movl %0, %%eax\n"
18006+ " andl $0xFFFEFFFF, %%eax\n"
18007+ " movl %%eax, %%cr0\n"
18008+#endif
18009+
18010+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18011+
18012+#ifdef CONFIG_PAX_KERNEXEC
18013+ " movl %0, %%cr0\n"
18014+#endif
18015+
18016 " jmp 2b\n"
18017 ".previous\n"
18018 _ASM_EXTABLE(1b, 3b)
18019- : : "r" (from));
18020+ : "=&r" (cr0) : "r" (from) : "ax");
18021
18022 for ( ; i > 5; i--) {
18023 __asm__ __volatile__ (
18024- "1: prefetch 320(%0)\n"
18025- "2: movq (%0), %%mm0\n"
18026- " movq 8(%0), %%mm1\n"
18027- " movq 16(%0), %%mm2\n"
18028- " movq 24(%0), %%mm3\n"
18029- " movq %%mm0, (%1)\n"
18030- " movq %%mm1, 8(%1)\n"
18031- " movq %%mm2, 16(%1)\n"
18032- " movq %%mm3, 24(%1)\n"
18033- " movq 32(%0), %%mm0\n"
18034- " movq 40(%0), %%mm1\n"
18035- " movq 48(%0), %%mm2\n"
18036- " movq 56(%0), %%mm3\n"
18037- " movq %%mm0, 32(%1)\n"
18038- " movq %%mm1, 40(%1)\n"
18039- " movq %%mm2, 48(%1)\n"
18040- " movq %%mm3, 56(%1)\n"
18041+ "1: prefetch 320(%1)\n"
18042+ "2: movq (%1), %%mm0\n"
18043+ " movq 8(%1), %%mm1\n"
18044+ " movq 16(%1), %%mm2\n"
18045+ " movq 24(%1), %%mm3\n"
18046+ " movq %%mm0, (%2)\n"
18047+ " movq %%mm1, 8(%2)\n"
18048+ " movq %%mm2, 16(%2)\n"
18049+ " movq %%mm3, 24(%2)\n"
18050+ " movq 32(%1), %%mm0\n"
18051+ " movq 40(%1), %%mm1\n"
18052+ " movq 48(%1), %%mm2\n"
18053+ " movq 56(%1), %%mm3\n"
18054+ " movq %%mm0, 32(%2)\n"
18055+ " movq %%mm1, 40(%2)\n"
18056+ " movq %%mm2, 48(%2)\n"
18057+ " movq %%mm3, 56(%2)\n"
18058 ".section .fixup, \"ax\"\n"
18059- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18060+ "3:\n"
18061+
18062+#ifdef CONFIG_PAX_KERNEXEC
18063+ " movl %%cr0, %0\n"
18064+ " movl %0, %%eax\n"
18065+ " andl $0xFFFEFFFF, %%eax\n"
18066+ " movl %%eax, %%cr0\n"
18067+#endif
18068+
18069+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18070+
18071+#ifdef CONFIG_PAX_KERNEXEC
18072+ " movl %0, %%cr0\n"
18073+#endif
18074+
18075 " jmp 2b\n"
18076 ".previous\n"
18077 _ASM_EXTABLE(1b, 3b)
18078- : : "r" (from), "r" (to) : "memory");
18079+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18080
18081 from += 64;
18082 to += 64;
18083@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18084 static void fast_copy_page(void *to, void *from)
18085 {
18086 int i;
18087+ unsigned long cr0;
18088
18089 kernel_fpu_begin();
18090
18091@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18092 * but that is for later. -AV
18093 */
18094 __asm__ __volatile__(
18095- "1: prefetch (%0)\n"
18096- " prefetch 64(%0)\n"
18097- " prefetch 128(%0)\n"
18098- " prefetch 192(%0)\n"
18099- " prefetch 256(%0)\n"
18100+ "1: prefetch (%1)\n"
18101+ " prefetch 64(%1)\n"
18102+ " prefetch 128(%1)\n"
18103+ " prefetch 192(%1)\n"
18104+ " prefetch 256(%1)\n"
18105 "2: \n"
18106 ".section .fixup, \"ax\"\n"
18107- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18108+ "3: \n"
18109+
18110+#ifdef CONFIG_PAX_KERNEXEC
18111+ " movl %%cr0, %0\n"
18112+ " movl %0, %%eax\n"
18113+ " andl $0xFFFEFFFF, %%eax\n"
18114+ " movl %%eax, %%cr0\n"
18115+#endif
18116+
18117+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18118+
18119+#ifdef CONFIG_PAX_KERNEXEC
18120+ " movl %0, %%cr0\n"
18121+#endif
18122+
18123 " jmp 2b\n"
18124 ".previous\n"
18125- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18126+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18127
18128 for (i = 0; i < (4096-320)/64; i++) {
18129 __asm__ __volatile__ (
18130- "1: prefetch 320(%0)\n"
18131- "2: movq (%0), %%mm0\n"
18132- " movntq %%mm0, (%1)\n"
18133- " movq 8(%0), %%mm1\n"
18134- " movntq %%mm1, 8(%1)\n"
18135- " movq 16(%0), %%mm2\n"
18136- " movntq %%mm2, 16(%1)\n"
18137- " movq 24(%0), %%mm3\n"
18138- " movntq %%mm3, 24(%1)\n"
18139- " movq 32(%0), %%mm4\n"
18140- " movntq %%mm4, 32(%1)\n"
18141- " movq 40(%0), %%mm5\n"
18142- " movntq %%mm5, 40(%1)\n"
18143- " movq 48(%0), %%mm6\n"
18144- " movntq %%mm6, 48(%1)\n"
18145- " movq 56(%0), %%mm7\n"
18146- " movntq %%mm7, 56(%1)\n"
18147+ "1: prefetch 320(%1)\n"
18148+ "2: movq (%1), %%mm0\n"
18149+ " movntq %%mm0, (%2)\n"
18150+ " movq 8(%1), %%mm1\n"
18151+ " movntq %%mm1, 8(%2)\n"
18152+ " movq 16(%1), %%mm2\n"
18153+ " movntq %%mm2, 16(%2)\n"
18154+ " movq 24(%1), %%mm3\n"
18155+ " movntq %%mm3, 24(%2)\n"
18156+ " movq 32(%1), %%mm4\n"
18157+ " movntq %%mm4, 32(%2)\n"
18158+ " movq 40(%1), %%mm5\n"
18159+ " movntq %%mm5, 40(%2)\n"
18160+ " movq 48(%1), %%mm6\n"
18161+ " movntq %%mm6, 48(%2)\n"
18162+ " movq 56(%1), %%mm7\n"
18163+ " movntq %%mm7, 56(%2)\n"
18164 ".section .fixup, \"ax\"\n"
18165- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18166+ "3:\n"
18167+
18168+#ifdef CONFIG_PAX_KERNEXEC
18169+ " movl %%cr0, %0\n"
18170+ " movl %0, %%eax\n"
18171+ " andl $0xFFFEFFFF, %%eax\n"
18172+ " movl %%eax, %%cr0\n"
18173+#endif
18174+
18175+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18176+
18177+#ifdef CONFIG_PAX_KERNEXEC
18178+ " movl %0, %%cr0\n"
18179+#endif
18180+
18181 " jmp 2b\n"
18182 ".previous\n"
18183- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18184+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18185
18186 from += 64;
18187 to += 64;
18188@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18189 static void fast_copy_page(void *to, void *from)
18190 {
18191 int i;
18192+ unsigned long cr0;
18193
18194 kernel_fpu_begin();
18195
18196 __asm__ __volatile__ (
18197- "1: prefetch (%0)\n"
18198- " prefetch 64(%0)\n"
18199- " prefetch 128(%0)\n"
18200- " prefetch 192(%0)\n"
18201- " prefetch 256(%0)\n"
18202+ "1: prefetch (%1)\n"
18203+ " prefetch 64(%1)\n"
18204+ " prefetch 128(%1)\n"
18205+ " prefetch 192(%1)\n"
18206+ " prefetch 256(%1)\n"
18207 "2: \n"
18208 ".section .fixup, \"ax\"\n"
18209- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18210+ "3: \n"
18211+
18212+#ifdef CONFIG_PAX_KERNEXEC
18213+ " movl %%cr0, %0\n"
18214+ " movl %0, %%eax\n"
18215+ " andl $0xFFFEFFFF, %%eax\n"
18216+ " movl %%eax, %%cr0\n"
18217+#endif
18218+
18219+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18220+
18221+#ifdef CONFIG_PAX_KERNEXEC
18222+ " movl %0, %%cr0\n"
18223+#endif
18224+
18225 " jmp 2b\n"
18226 ".previous\n"
18227- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18228+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18229
18230 for (i = 0; i < 4096/64; i++) {
18231 __asm__ __volatile__ (
18232- "1: prefetch 320(%0)\n"
18233- "2: movq (%0), %%mm0\n"
18234- " movq 8(%0), %%mm1\n"
18235- " movq 16(%0), %%mm2\n"
18236- " movq 24(%0), %%mm3\n"
18237- " movq %%mm0, (%1)\n"
18238- " movq %%mm1, 8(%1)\n"
18239- " movq %%mm2, 16(%1)\n"
18240- " movq %%mm3, 24(%1)\n"
18241- " movq 32(%0), %%mm0\n"
18242- " movq 40(%0), %%mm1\n"
18243- " movq 48(%0), %%mm2\n"
18244- " movq 56(%0), %%mm3\n"
18245- " movq %%mm0, 32(%1)\n"
18246- " movq %%mm1, 40(%1)\n"
18247- " movq %%mm2, 48(%1)\n"
18248- " movq %%mm3, 56(%1)\n"
18249+ "1: prefetch 320(%1)\n"
18250+ "2: movq (%1), %%mm0\n"
18251+ " movq 8(%1), %%mm1\n"
18252+ " movq 16(%1), %%mm2\n"
18253+ " movq 24(%1), %%mm3\n"
18254+ " movq %%mm0, (%2)\n"
18255+ " movq %%mm1, 8(%2)\n"
18256+ " movq %%mm2, 16(%2)\n"
18257+ " movq %%mm3, 24(%2)\n"
18258+ " movq 32(%1), %%mm0\n"
18259+ " movq 40(%1), %%mm1\n"
18260+ " movq 48(%1), %%mm2\n"
18261+ " movq 56(%1), %%mm3\n"
18262+ " movq %%mm0, 32(%2)\n"
18263+ " movq %%mm1, 40(%2)\n"
18264+ " movq %%mm2, 48(%2)\n"
18265+ " movq %%mm3, 56(%2)\n"
18266 ".section .fixup, \"ax\"\n"
18267- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18268+ "3:\n"
18269+
18270+#ifdef CONFIG_PAX_KERNEXEC
18271+ " movl %%cr0, %0\n"
18272+ " movl %0, %%eax\n"
18273+ " andl $0xFFFEFFFF, %%eax\n"
18274+ " movl %%eax, %%cr0\n"
18275+#endif
18276+
18277+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18278+
18279+#ifdef CONFIG_PAX_KERNEXEC
18280+ " movl %0, %%cr0\n"
18281+#endif
18282+
18283 " jmp 2b\n"
18284 ".previous\n"
18285 _ASM_EXTABLE(1b, 3b)
18286- : : "r" (from), "r" (to) : "memory");
18287+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18288
18289 from += 64;
18290 to += 64;
18291diff -urNp linux-3.0.7/arch/x86/lib/msr-reg.S linux-3.0.7/arch/x86/lib/msr-reg.S
18292--- linux-3.0.7/arch/x86/lib/msr-reg.S 2011-07-21 22:17:23.000000000 -0400
18293+++ linux-3.0.7/arch/x86/lib/msr-reg.S 2011-10-07 19:07:28.000000000 -0400
18294@@ -3,6 +3,7 @@
18295 #include <asm/dwarf2.h>
18296 #include <asm/asm.h>
18297 #include <asm/msr.h>
18298+#include <asm/alternative-asm.h>
18299
18300 #ifdef CONFIG_X86_64
18301 /*
18302@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18303 movl %edi, 28(%r10)
18304 popq_cfi %rbp
18305 popq_cfi %rbx
18306+ pax_force_retaddr
18307 ret
18308 3:
18309 CFI_RESTORE_STATE
18310diff -urNp linux-3.0.7/arch/x86/lib/putuser.S linux-3.0.7/arch/x86/lib/putuser.S
18311--- linux-3.0.7/arch/x86/lib/putuser.S 2011-07-21 22:17:23.000000000 -0400
18312+++ linux-3.0.7/arch/x86/lib/putuser.S 2011-10-07 19:07:23.000000000 -0400
18313@@ -15,7 +15,9 @@
18314 #include <asm/thread_info.h>
18315 #include <asm/errno.h>
18316 #include <asm/asm.h>
18317-
18318+#include <asm/segment.h>
18319+#include <asm/pgtable.h>
18320+#include <asm/alternative-asm.h>
18321
18322 /*
18323 * __put_user_X
18324@@ -29,52 +31,119 @@
18325 * as they get called from within inline assembly.
18326 */
18327
18328-#define ENTER CFI_STARTPROC ; \
18329- GET_THREAD_INFO(%_ASM_BX)
18330-#define EXIT ret ; \
18331+#define ENTER CFI_STARTPROC
18332+#define EXIT pax_force_retaddr; ret ; \
18333 CFI_ENDPROC
18334
18335+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18336+#define _DEST %_ASM_CX,%_ASM_BX
18337+#else
18338+#define _DEST %_ASM_CX
18339+#endif
18340+
18341+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18342+#define __copyuser_seg gs;
18343+#else
18344+#define __copyuser_seg
18345+#endif
18346+
18347 .text
18348 ENTRY(__put_user_1)
18349 ENTER
18350+
18351+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18352+ GET_THREAD_INFO(%_ASM_BX)
18353 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18354 jae bad_put_user
18355-1: movb %al,(%_ASM_CX)
18356+
18357+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18358+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18359+ cmp %_ASM_BX,%_ASM_CX
18360+ jb 1234f
18361+ xor %ebx,%ebx
18362+1234:
18363+#endif
18364+
18365+#endif
18366+
18367+1: __copyuser_seg movb %al,(_DEST)
18368 xor %eax,%eax
18369 EXIT
18370 ENDPROC(__put_user_1)
18371
18372 ENTRY(__put_user_2)
18373 ENTER
18374+
18375+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18376+ GET_THREAD_INFO(%_ASM_BX)
18377 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18378 sub $1,%_ASM_BX
18379 cmp %_ASM_BX,%_ASM_CX
18380 jae bad_put_user
18381-2: movw %ax,(%_ASM_CX)
18382+
18383+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18384+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18385+ cmp %_ASM_BX,%_ASM_CX
18386+ jb 1234f
18387+ xor %ebx,%ebx
18388+1234:
18389+#endif
18390+
18391+#endif
18392+
18393+2: __copyuser_seg movw %ax,(_DEST)
18394 xor %eax,%eax
18395 EXIT
18396 ENDPROC(__put_user_2)
18397
18398 ENTRY(__put_user_4)
18399 ENTER
18400+
18401+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18402+ GET_THREAD_INFO(%_ASM_BX)
18403 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18404 sub $3,%_ASM_BX
18405 cmp %_ASM_BX,%_ASM_CX
18406 jae bad_put_user
18407-3: movl %eax,(%_ASM_CX)
18408+
18409+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18410+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18411+ cmp %_ASM_BX,%_ASM_CX
18412+ jb 1234f
18413+ xor %ebx,%ebx
18414+1234:
18415+#endif
18416+
18417+#endif
18418+
18419+3: __copyuser_seg movl %eax,(_DEST)
18420 xor %eax,%eax
18421 EXIT
18422 ENDPROC(__put_user_4)
18423
18424 ENTRY(__put_user_8)
18425 ENTER
18426+
18427+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18428+ GET_THREAD_INFO(%_ASM_BX)
18429 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18430 sub $7,%_ASM_BX
18431 cmp %_ASM_BX,%_ASM_CX
18432 jae bad_put_user
18433-4: mov %_ASM_AX,(%_ASM_CX)
18434+
18435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18436+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18437+ cmp %_ASM_BX,%_ASM_CX
18438+ jb 1234f
18439+ xor %ebx,%ebx
18440+1234:
18441+#endif
18442+
18443+#endif
18444+
18445+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18446 #ifdef CONFIG_X86_32
18447-5: movl %edx,4(%_ASM_CX)
18448+5: __copyuser_seg movl %edx,4(_DEST)
18449 #endif
18450 xor %eax,%eax
18451 EXIT
18452diff -urNp linux-3.0.7/arch/x86/lib/rwlock_64.S linux-3.0.7/arch/x86/lib/rwlock_64.S
18453--- linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-07-21 22:17:23.000000000 -0400
18454+++ linux-3.0.7/arch/x86/lib/rwlock_64.S 2011-10-06 04:17:55.000000000 -0400
18455@@ -17,6 +17,7 @@ ENTRY(__write_lock_failed)
18456 LOCK_PREFIX
18457 subl $RW_LOCK_BIAS,(%rdi)
18458 jnz __write_lock_failed
18459+ pax_force_retaddr
18460 ret
18461 CFI_ENDPROC
18462 END(__write_lock_failed)
18463@@ -33,6 +34,7 @@ ENTRY(__read_lock_failed)
18464 LOCK_PREFIX
18465 decl (%rdi)
18466 js __read_lock_failed
18467+ pax_force_retaddr
18468 ret
18469 CFI_ENDPROC
18470 END(__read_lock_failed)
18471diff -urNp linux-3.0.7/arch/x86/lib/rwsem_64.S linux-3.0.7/arch/x86/lib/rwsem_64.S
18472--- linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-07-21 22:17:23.000000000 -0400
18473+++ linux-3.0.7/arch/x86/lib/rwsem_64.S 2011-10-07 10:46:47.000000000 -0400
18474@@ -51,6 +51,7 @@ ENTRY(call_rwsem_down_read_failed)
18475 popq_cfi %rdx
18476 CFI_RESTORE rdx
18477 restore_common_regs
18478+ pax_force_retaddr
18479 ret
18480 CFI_ENDPROC
18481 ENDPROC(call_rwsem_down_read_failed)
18482@@ -61,6 +62,7 @@ ENTRY(call_rwsem_down_write_failed)
18483 movq %rax,%rdi
18484 call rwsem_down_write_failed
18485 restore_common_regs
18486+ pax_force_retaddr
18487 ret
18488 CFI_ENDPROC
18489 ENDPROC(call_rwsem_down_write_failed)
18490@@ -73,7 +75,8 @@ ENTRY(call_rwsem_wake)
18491 movq %rax,%rdi
18492 call rwsem_wake
18493 restore_common_regs
18494-1: ret
18495+1: pax_force_retaddr
18496+ ret
18497 CFI_ENDPROC
18498 ENDPROC(call_rwsem_wake)
18499
18500@@ -88,6 +91,7 @@ ENTRY(call_rwsem_downgrade_wake)
18501 popq_cfi %rdx
18502 CFI_RESTORE rdx
18503 restore_common_regs
18504+ pax_force_retaddr
18505 ret
18506 CFI_ENDPROC
18507 ENDPROC(call_rwsem_downgrade_wake)
18508diff -urNp linux-3.0.7/arch/x86/lib/thunk_64.S linux-3.0.7/arch/x86/lib/thunk_64.S
18509--- linux-3.0.7/arch/x86/lib/thunk_64.S 2011-07-21 22:17:23.000000000 -0400
18510+++ linux-3.0.7/arch/x86/lib/thunk_64.S 2011-10-06 04:17:55.000000000 -0400
18511@@ -10,7 +10,8 @@
18512 #include <asm/dwarf2.h>
18513 #include <asm/calling.h>
18514 #include <asm/rwlock.h>
18515-
18516+ #include <asm/alternative-asm.h>
18517+
18518 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
18519 .macro thunk name,func
18520 .globl \name
18521@@ -50,5 +51,6 @@
18522 SAVE_ARGS
18523 restore:
18524 RESTORE_ARGS
18525- ret
18526+ pax_force_retaddr
18527+ ret
18528 CFI_ENDPROC
18529diff -urNp linux-3.0.7/arch/x86/lib/usercopy_32.c linux-3.0.7/arch/x86/lib/usercopy_32.c
18530--- linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-07-21 22:17:23.000000000 -0400
18531+++ linux-3.0.7/arch/x86/lib/usercopy_32.c 2011-08-23 21:47:55.000000000 -0400
18532@@ -43,7 +43,7 @@ do { \
18533 __asm__ __volatile__( \
18534 " testl %1,%1\n" \
18535 " jz 2f\n" \
18536- "0: lodsb\n" \
18537+ "0: "__copyuser_seg"lodsb\n" \
18538 " stosb\n" \
18539 " testb %%al,%%al\n" \
18540 " jz 1f\n" \
18541@@ -128,10 +128,12 @@ do { \
18542 int __d0; \
18543 might_fault(); \
18544 __asm__ __volatile__( \
18545+ __COPYUSER_SET_ES \
18546 "0: rep; stosl\n" \
18547 " movl %2,%0\n" \
18548 "1: rep; stosb\n" \
18549 "2:\n" \
18550+ __COPYUSER_RESTORE_ES \
18551 ".section .fixup,\"ax\"\n" \
18552 "3: lea 0(%2,%0,4),%0\n" \
18553 " jmp 2b\n" \
18554@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18555 might_fault();
18556
18557 __asm__ __volatile__(
18558+ __COPYUSER_SET_ES
18559 " testl %0, %0\n"
18560 " jz 3f\n"
18561 " andl %0,%%ecx\n"
18562@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18563 " subl %%ecx,%0\n"
18564 " addl %0,%%eax\n"
18565 "1:\n"
18566+ __COPYUSER_RESTORE_ES
18567 ".section .fixup,\"ax\"\n"
18568 "2: xorl %%eax,%%eax\n"
18569 " jmp 1b\n"
18570@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18571
18572 #ifdef CONFIG_X86_INTEL_USERCOPY
18573 static unsigned long
18574-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18575+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18576 {
18577 int d0, d1;
18578 __asm__ __volatile__(
18579@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18580 " .align 2,0x90\n"
18581 "3: movl 0(%4), %%eax\n"
18582 "4: movl 4(%4), %%edx\n"
18583- "5: movl %%eax, 0(%3)\n"
18584- "6: movl %%edx, 4(%3)\n"
18585+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18586+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18587 "7: movl 8(%4), %%eax\n"
18588 "8: movl 12(%4),%%edx\n"
18589- "9: movl %%eax, 8(%3)\n"
18590- "10: movl %%edx, 12(%3)\n"
18591+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
18592+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
18593 "11: movl 16(%4), %%eax\n"
18594 "12: movl 20(%4), %%edx\n"
18595- "13: movl %%eax, 16(%3)\n"
18596- "14: movl %%edx, 20(%3)\n"
18597+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
18598+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
18599 "15: movl 24(%4), %%eax\n"
18600 "16: movl 28(%4), %%edx\n"
18601- "17: movl %%eax, 24(%3)\n"
18602- "18: movl %%edx, 28(%3)\n"
18603+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
18604+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
18605 "19: movl 32(%4), %%eax\n"
18606 "20: movl 36(%4), %%edx\n"
18607- "21: movl %%eax, 32(%3)\n"
18608- "22: movl %%edx, 36(%3)\n"
18609+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
18610+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
18611 "23: movl 40(%4), %%eax\n"
18612 "24: movl 44(%4), %%edx\n"
18613- "25: movl %%eax, 40(%3)\n"
18614- "26: movl %%edx, 44(%3)\n"
18615+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
18616+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
18617 "27: movl 48(%4), %%eax\n"
18618 "28: movl 52(%4), %%edx\n"
18619- "29: movl %%eax, 48(%3)\n"
18620- "30: movl %%edx, 52(%3)\n"
18621+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
18622+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
18623 "31: movl 56(%4), %%eax\n"
18624 "32: movl 60(%4), %%edx\n"
18625- "33: movl %%eax, 56(%3)\n"
18626- "34: movl %%edx, 60(%3)\n"
18627+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
18628+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
18629 " addl $-64, %0\n"
18630 " addl $64, %4\n"
18631 " addl $64, %3\n"
18632@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
18633 " shrl $2, %0\n"
18634 " andl $3, %%eax\n"
18635 " cld\n"
18636+ __COPYUSER_SET_ES
18637 "99: rep; movsl\n"
18638 "36: movl %%eax, %0\n"
18639 "37: rep; movsb\n"
18640 "100:\n"
18641+ __COPYUSER_RESTORE_ES
18642+ ".section .fixup,\"ax\"\n"
18643+ "101: lea 0(%%eax,%0,4),%0\n"
18644+ " jmp 100b\n"
18645+ ".previous\n"
18646+ ".section __ex_table,\"a\"\n"
18647+ " .align 4\n"
18648+ " .long 1b,100b\n"
18649+ " .long 2b,100b\n"
18650+ " .long 3b,100b\n"
18651+ " .long 4b,100b\n"
18652+ " .long 5b,100b\n"
18653+ " .long 6b,100b\n"
18654+ " .long 7b,100b\n"
18655+ " .long 8b,100b\n"
18656+ " .long 9b,100b\n"
18657+ " .long 10b,100b\n"
18658+ " .long 11b,100b\n"
18659+ " .long 12b,100b\n"
18660+ " .long 13b,100b\n"
18661+ " .long 14b,100b\n"
18662+ " .long 15b,100b\n"
18663+ " .long 16b,100b\n"
18664+ " .long 17b,100b\n"
18665+ " .long 18b,100b\n"
18666+ " .long 19b,100b\n"
18667+ " .long 20b,100b\n"
18668+ " .long 21b,100b\n"
18669+ " .long 22b,100b\n"
18670+ " .long 23b,100b\n"
18671+ " .long 24b,100b\n"
18672+ " .long 25b,100b\n"
18673+ " .long 26b,100b\n"
18674+ " .long 27b,100b\n"
18675+ " .long 28b,100b\n"
18676+ " .long 29b,100b\n"
18677+ " .long 30b,100b\n"
18678+ " .long 31b,100b\n"
18679+ " .long 32b,100b\n"
18680+ " .long 33b,100b\n"
18681+ " .long 34b,100b\n"
18682+ " .long 35b,100b\n"
18683+ " .long 36b,100b\n"
18684+ " .long 37b,100b\n"
18685+ " .long 99b,101b\n"
18686+ ".previous"
18687+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
18688+ : "1"(to), "2"(from), "0"(size)
18689+ : "eax", "edx", "memory");
18690+ return size;
18691+}
18692+
18693+static unsigned long
18694+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
18695+{
18696+ int d0, d1;
18697+ __asm__ __volatile__(
18698+ " .align 2,0x90\n"
18699+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
18700+ " cmpl $67, %0\n"
18701+ " jbe 3f\n"
18702+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
18703+ " .align 2,0x90\n"
18704+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
18705+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
18706+ "5: movl %%eax, 0(%3)\n"
18707+ "6: movl %%edx, 4(%3)\n"
18708+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
18709+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
18710+ "9: movl %%eax, 8(%3)\n"
18711+ "10: movl %%edx, 12(%3)\n"
18712+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
18713+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
18714+ "13: movl %%eax, 16(%3)\n"
18715+ "14: movl %%edx, 20(%3)\n"
18716+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
18717+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
18718+ "17: movl %%eax, 24(%3)\n"
18719+ "18: movl %%edx, 28(%3)\n"
18720+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
18721+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
18722+ "21: movl %%eax, 32(%3)\n"
18723+ "22: movl %%edx, 36(%3)\n"
18724+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
18725+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
18726+ "25: movl %%eax, 40(%3)\n"
18727+ "26: movl %%edx, 44(%3)\n"
18728+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
18729+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
18730+ "29: movl %%eax, 48(%3)\n"
18731+ "30: movl %%edx, 52(%3)\n"
18732+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
18733+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18734+ "33: movl %%eax, 56(%3)\n"
18735+ "34: movl %%edx, 60(%3)\n"
18736+ " addl $-64, %0\n"
18737+ " addl $64, %4\n"
18738+ " addl $64, %3\n"
18739+ " cmpl $63, %0\n"
18740+ " ja 1b\n"
18741+ "35: movl %0, %%eax\n"
18742+ " shrl $2, %0\n"
18743+ " andl $3, %%eax\n"
18744+ " cld\n"
18745+ "99: rep; "__copyuser_seg" movsl\n"
18746+ "36: movl %%eax, %0\n"
18747+ "37: rep; "__copyuser_seg" movsb\n"
18748+ "100:\n"
18749 ".section .fixup,\"ax\"\n"
18750 "101: lea 0(%%eax,%0,4),%0\n"
18751 " jmp 100b\n"
18752@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18753 int d0, d1;
18754 __asm__ __volatile__(
18755 " .align 2,0x90\n"
18756- "0: movl 32(%4), %%eax\n"
18757+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18758 " cmpl $67, %0\n"
18759 " jbe 2f\n"
18760- "1: movl 64(%4), %%eax\n"
18761+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18762 " .align 2,0x90\n"
18763- "2: movl 0(%4), %%eax\n"
18764- "21: movl 4(%4), %%edx\n"
18765+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18766+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18767 " movl %%eax, 0(%3)\n"
18768 " movl %%edx, 4(%3)\n"
18769- "3: movl 8(%4), %%eax\n"
18770- "31: movl 12(%4),%%edx\n"
18771+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18772+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18773 " movl %%eax, 8(%3)\n"
18774 " movl %%edx, 12(%3)\n"
18775- "4: movl 16(%4), %%eax\n"
18776- "41: movl 20(%4), %%edx\n"
18777+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18778+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18779 " movl %%eax, 16(%3)\n"
18780 " movl %%edx, 20(%3)\n"
18781- "10: movl 24(%4), %%eax\n"
18782- "51: movl 28(%4), %%edx\n"
18783+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18784+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18785 " movl %%eax, 24(%3)\n"
18786 " movl %%edx, 28(%3)\n"
18787- "11: movl 32(%4), %%eax\n"
18788- "61: movl 36(%4), %%edx\n"
18789+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18790+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18791 " movl %%eax, 32(%3)\n"
18792 " movl %%edx, 36(%3)\n"
18793- "12: movl 40(%4), %%eax\n"
18794- "71: movl 44(%4), %%edx\n"
18795+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18796+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18797 " movl %%eax, 40(%3)\n"
18798 " movl %%edx, 44(%3)\n"
18799- "13: movl 48(%4), %%eax\n"
18800- "81: movl 52(%4), %%edx\n"
18801+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18802+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18803 " movl %%eax, 48(%3)\n"
18804 " movl %%edx, 52(%3)\n"
18805- "14: movl 56(%4), %%eax\n"
18806- "91: movl 60(%4), %%edx\n"
18807+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18808+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18809 " movl %%eax, 56(%3)\n"
18810 " movl %%edx, 60(%3)\n"
18811 " addl $-64, %0\n"
18812@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18813 " shrl $2, %0\n"
18814 " andl $3, %%eax\n"
18815 " cld\n"
18816- "6: rep; movsl\n"
18817+ "6: rep; "__copyuser_seg" movsl\n"
18818 " movl %%eax,%0\n"
18819- "7: rep; movsb\n"
18820+ "7: rep; "__copyuser_seg" movsb\n"
18821 "8:\n"
18822 ".section .fixup,\"ax\"\n"
18823 "9: lea 0(%%eax,%0,4),%0\n"
18824@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18825
18826 __asm__ __volatile__(
18827 " .align 2,0x90\n"
18828- "0: movl 32(%4), %%eax\n"
18829+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18830 " cmpl $67, %0\n"
18831 " jbe 2f\n"
18832- "1: movl 64(%4), %%eax\n"
18833+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18834 " .align 2,0x90\n"
18835- "2: movl 0(%4), %%eax\n"
18836- "21: movl 4(%4), %%edx\n"
18837+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18838+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18839 " movnti %%eax, 0(%3)\n"
18840 " movnti %%edx, 4(%3)\n"
18841- "3: movl 8(%4), %%eax\n"
18842- "31: movl 12(%4),%%edx\n"
18843+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18844+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18845 " movnti %%eax, 8(%3)\n"
18846 " movnti %%edx, 12(%3)\n"
18847- "4: movl 16(%4), %%eax\n"
18848- "41: movl 20(%4), %%edx\n"
18849+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18850+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18851 " movnti %%eax, 16(%3)\n"
18852 " movnti %%edx, 20(%3)\n"
18853- "10: movl 24(%4), %%eax\n"
18854- "51: movl 28(%4), %%edx\n"
18855+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18856+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18857 " movnti %%eax, 24(%3)\n"
18858 " movnti %%edx, 28(%3)\n"
18859- "11: movl 32(%4), %%eax\n"
18860- "61: movl 36(%4), %%edx\n"
18861+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18862+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18863 " movnti %%eax, 32(%3)\n"
18864 " movnti %%edx, 36(%3)\n"
18865- "12: movl 40(%4), %%eax\n"
18866- "71: movl 44(%4), %%edx\n"
18867+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18868+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18869 " movnti %%eax, 40(%3)\n"
18870 " movnti %%edx, 44(%3)\n"
18871- "13: movl 48(%4), %%eax\n"
18872- "81: movl 52(%4), %%edx\n"
18873+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18874+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18875 " movnti %%eax, 48(%3)\n"
18876 " movnti %%edx, 52(%3)\n"
18877- "14: movl 56(%4), %%eax\n"
18878- "91: movl 60(%4), %%edx\n"
18879+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18880+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18881 " movnti %%eax, 56(%3)\n"
18882 " movnti %%edx, 60(%3)\n"
18883 " addl $-64, %0\n"
18884@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18885 " shrl $2, %0\n"
18886 " andl $3, %%eax\n"
18887 " cld\n"
18888- "6: rep; movsl\n"
18889+ "6: rep; "__copyuser_seg" movsl\n"
18890 " movl %%eax,%0\n"
18891- "7: rep; movsb\n"
18892+ "7: rep; "__copyuser_seg" movsb\n"
18893 "8:\n"
18894 ".section .fixup,\"ax\"\n"
18895 "9: lea 0(%%eax,%0,4),%0\n"
18896@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18897
18898 __asm__ __volatile__(
18899 " .align 2,0x90\n"
18900- "0: movl 32(%4), %%eax\n"
18901+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18902 " cmpl $67, %0\n"
18903 " jbe 2f\n"
18904- "1: movl 64(%4), %%eax\n"
18905+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18906 " .align 2,0x90\n"
18907- "2: movl 0(%4), %%eax\n"
18908- "21: movl 4(%4), %%edx\n"
18909+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18910+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18911 " movnti %%eax, 0(%3)\n"
18912 " movnti %%edx, 4(%3)\n"
18913- "3: movl 8(%4), %%eax\n"
18914- "31: movl 12(%4),%%edx\n"
18915+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18916+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18917 " movnti %%eax, 8(%3)\n"
18918 " movnti %%edx, 12(%3)\n"
18919- "4: movl 16(%4), %%eax\n"
18920- "41: movl 20(%4), %%edx\n"
18921+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18922+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18923 " movnti %%eax, 16(%3)\n"
18924 " movnti %%edx, 20(%3)\n"
18925- "10: movl 24(%4), %%eax\n"
18926- "51: movl 28(%4), %%edx\n"
18927+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18928+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18929 " movnti %%eax, 24(%3)\n"
18930 " movnti %%edx, 28(%3)\n"
18931- "11: movl 32(%4), %%eax\n"
18932- "61: movl 36(%4), %%edx\n"
18933+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18934+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18935 " movnti %%eax, 32(%3)\n"
18936 " movnti %%edx, 36(%3)\n"
18937- "12: movl 40(%4), %%eax\n"
18938- "71: movl 44(%4), %%edx\n"
18939+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18940+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18941 " movnti %%eax, 40(%3)\n"
18942 " movnti %%edx, 44(%3)\n"
18943- "13: movl 48(%4), %%eax\n"
18944- "81: movl 52(%4), %%edx\n"
18945+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18946+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18947 " movnti %%eax, 48(%3)\n"
18948 " movnti %%edx, 52(%3)\n"
18949- "14: movl 56(%4), %%eax\n"
18950- "91: movl 60(%4), %%edx\n"
18951+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18952+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18953 " movnti %%eax, 56(%3)\n"
18954 " movnti %%edx, 60(%3)\n"
18955 " addl $-64, %0\n"
18956@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18957 " shrl $2, %0\n"
18958 " andl $3, %%eax\n"
18959 " cld\n"
18960- "6: rep; movsl\n"
18961+ "6: rep; "__copyuser_seg" movsl\n"
18962 " movl %%eax,%0\n"
18963- "7: rep; movsb\n"
18964+ "7: rep; "__copyuser_seg" movsb\n"
18965 "8:\n"
18966 ".section .fixup,\"ax\"\n"
18967 "9: lea 0(%%eax,%0,4),%0\n"
18968@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18969 */
18970 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18971 unsigned long size);
18972-unsigned long __copy_user_intel(void __user *to, const void *from,
18973+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18974+ unsigned long size);
18975+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18976 unsigned long size);
18977 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18978 const void __user *from, unsigned long size);
18979 #endif /* CONFIG_X86_INTEL_USERCOPY */
18980
18981 /* Generic arbitrary sized copy. */
18982-#define __copy_user(to, from, size) \
18983+#define __copy_user(to, from, size, prefix, set, restore) \
18984 do { \
18985 int __d0, __d1, __d2; \
18986 __asm__ __volatile__( \
18987+ set \
18988 " cmp $7,%0\n" \
18989 " jbe 1f\n" \
18990 " movl %1,%0\n" \
18991 " negl %0\n" \
18992 " andl $7,%0\n" \
18993 " subl %0,%3\n" \
18994- "4: rep; movsb\n" \
18995+ "4: rep; "prefix"movsb\n" \
18996 " movl %3,%0\n" \
18997 " shrl $2,%0\n" \
18998 " andl $3,%3\n" \
18999 " .align 2,0x90\n" \
19000- "0: rep; movsl\n" \
19001+ "0: rep; "prefix"movsl\n" \
19002 " movl %3,%0\n" \
19003- "1: rep; movsb\n" \
19004+ "1: rep; "prefix"movsb\n" \
19005 "2:\n" \
19006+ restore \
19007 ".section .fixup,\"ax\"\n" \
19008 "5: addl %3,%0\n" \
19009 " jmp 2b\n" \
19010@@ -682,14 +799,14 @@ do { \
19011 " negl %0\n" \
19012 " andl $7,%0\n" \
19013 " subl %0,%3\n" \
19014- "4: rep; movsb\n" \
19015+ "4: rep; "__copyuser_seg"movsb\n" \
19016 " movl %3,%0\n" \
19017 " shrl $2,%0\n" \
19018 " andl $3,%3\n" \
19019 " .align 2,0x90\n" \
19020- "0: rep; movsl\n" \
19021+ "0: rep; "__copyuser_seg"movsl\n" \
19022 " movl %3,%0\n" \
19023- "1: rep; movsb\n" \
19024+ "1: rep; "__copyuser_seg"movsb\n" \
19025 "2:\n" \
19026 ".section .fixup,\"ax\"\n" \
19027 "5: addl %3,%0\n" \
19028@@ -775,9 +892,9 @@ survive:
19029 }
19030 #endif
19031 if (movsl_is_ok(to, from, n))
19032- __copy_user(to, from, n);
19033+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19034 else
19035- n = __copy_user_intel(to, from, n);
19036+ n = __generic_copy_to_user_intel(to, from, n);
19037 return n;
19038 }
19039 EXPORT_SYMBOL(__copy_to_user_ll);
19040@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19041 unsigned long n)
19042 {
19043 if (movsl_is_ok(to, from, n))
19044- __copy_user(to, from, n);
19045+ __copy_user(to, from, n, __copyuser_seg, "", "");
19046 else
19047- n = __copy_user_intel((void __user *)to,
19048- (const void *)from, n);
19049+ n = __generic_copy_from_user_intel(to, from, n);
19050 return n;
19051 }
19052 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19053@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19054 if (n > 64 && cpu_has_xmm2)
19055 n = __copy_user_intel_nocache(to, from, n);
19056 else
19057- __copy_user(to, from, n);
19058+ __copy_user(to, from, n, __copyuser_seg, "", "");
19059 #else
19060- __copy_user(to, from, n);
19061+ __copy_user(to, from, n, __copyuser_seg, "", "");
19062 #endif
19063 return n;
19064 }
19065 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19066
19067-/**
19068- * copy_to_user: - Copy a block of data into user space.
19069- * @to: Destination address, in user space.
19070- * @from: Source address, in kernel space.
19071- * @n: Number of bytes to copy.
19072- *
19073- * Context: User context only. This function may sleep.
19074- *
19075- * Copy data from kernel space to user space.
19076- *
19077- * Returns number of bytes that could not be copied.
19078- * On success, this will be zero.
19079- */
19080-unsigned long
19081-copy_to_user(void __user *to, const void *from, unsigned long n)
19082+void copy_from_user_overflow(void)
19083 {
19084- if (access_ok(VERIFY_WRITE, to, n))
19085- n = __copy_to_user(to, from, n);
19086- return n;
19087+ WARN(1, "Buffer overflow detected!\n");
19088 }
19089-EXPORT_SYMBOL(copy_to_user);
19090+EXPORT_SYMBOL(copy_from_user_overflow);
19091
19092-/**
19093- * copy_from_user: - Copy a block of data from user space.
19094- * @to: Destination address, in kernel space.
19095- * @from: Source address, in user space.
19096- * @n: Number of bytes to copy.
19097- *
19098- * Context: User context only. This function may sleep.
19099- *
19100- * Copy data from user space to kernel space.
19101- *
19102- * Returns number of bytes that could not be copied.
19103- * On success, this will be zero.
19104- *
19105- * If some data could not be copied, this function will pad the copied
19106- * data to the requested size using zero bytes.
19107- */
19108-unsigned long
19109-_copy_from_user(void *to, const void __user *from, unsigned long n)
19110+void copy_to_user_overflow(void)
19111 {
19112- if (access_ok(VERIFY_READ, from, n))
19113- n = __copy_from_user(to, from, n);
19114- else
19115- memset(to, 0, n);
19116- return n;
19117+ WARN(1, "Buffer overflow detected!\n");
19118 }
19119-EXPORT_SYMBOL(_copy_from_user);
19120+EXPORT_SYMBOL(copy_to_user_overflow);
19121
19122-void copy_from_user_overflow(void)
19123+#ifdef CONFIG_PAX_MEMORY_UDEREF
19124+void __set_fs(mm_segment_t x)
19125 {
19126- WARN(1, "Buffer overflow detected!\n");
19127+ switch (x.seg) {
19128+ case 0:
19129+ loadsegment(gs, 0);
19130+ break;
19131+ case TASK_SIZE_MAX:
19132+ loadsegment(gs, __USER_DS);
19133+ break;
19134+ case -1UL:
19135+ loadsegment(gs, __KERNEL_DS);
19136+ break;
19137+ default:
19138+ BUG();
19139+ }
19140+ return;
19141 }
19142-EXPORT_SYMBOL(copy_from_user_overflow);
19143+EXPORT_SYMBOL(__set_fs);
19144+
19145+void set_fs(mm_segment_t x)
19146+{
19147+ current_thread_info()->addr_limit = x;
19148+ __set_fs(x);
19149+}
19150+EXPORT_SYMBOL(set_fs);
19151+#endif
19152diff -urNp linux-3.0.7/arch/x86/lib/usercopy_64.c linux-3.0.7/arch/x86/lib/usercopy_64.c
19153--- linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-07-21 22:17:23.000000000 -0400
19154+++ linux-3.0.7/arch/x86/lib/usercopy_64.c 2011-10-06 04:17:55.000000000 -0400
19155@@ -42,6 +42,12 @@ long
19156 __strncpy_from_user(char *dst, const char __user *src, long count)
19157 {
19158 long res;
19159+
19160+#ifdef CONFIG_PAX_MEMORY_UDEREF
19161+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19162+ src += PAX_USER_SHADOW_BASE;
19163+#endif
19164+
19165 __do_strncpy_from_user(dst, src, count, res);
19166 return res;
19167 }
19168@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19169 {
19170 long __d0;
19171 might_fault();
19172+
19173+#ifdef CONFIG_PAX_MEMORY_UDEREF
19174+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19175+ addr += PAX_USER_SHADOW_BASE;
19176+#endif
19177+
19178 /* no memory constraint because it doesn't change any memory gcc knows
19179 about */
19180 asm volatile(
19181@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19182
19183 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19184 {
19185- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19186- return copy_user_generic((__force void *)to, (__force void *)from, len);
19187- }
19188- return len;
19189+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19190+
19191+#ifdef CONFIG_PAX_MEMORY_UDEREF
19192+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19193+ to += PAX_USER_SHADOW_BASE;
19194+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19195+ from += PAX_USER_SHADOW_BASE;
19196+#endif
19197+
19198+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19199+ }
19200+ return len;
19201 }
19202 EXPORT_SYMBOL(copy_in_user);
19203
19204@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19205 * it is not necessary to optimize tail handling.
19206 */
19207 unsigned long
19208-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19209+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19210 {
19211 char c;
19212 unsigned zero_len;
19213diff -urNp linux-3.0.7/arch/x86/Makefile linux-3.0.7/arch/x86/Makefile
19214--- linux-3.0.7/arch/x86/Makefile 2011-07-21 22:17:23.000000000 -0400
19215+++ linux-3.0.7/arch/x86/Makefile 2011-08-23 21:48:14.000000000 -0400
19216@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
19217 else
19218 BITS := 64
19219 UTS_MACHINE := x86_64
19220+ biarch := $(call cc-option,-m64)
19221 CHECKFLAGS += -D__x86_64__ -m64
19222
19223 KBUILD_AFLAGS += -m64
19224@@ -195,3 +196,12 @@ define archhelp
19225 echo ' FDARGS="..." arguments for the booted kernel'
19226 echo ' FDINITRD=file initrd for the booted kernel'
19227 endef
19228+
19229+define OLD_LD
19230+
19231+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19232+*** Please upgrade your binutils to 2.18 or newer
19233+endef
19234+
19235+archprepare:
19236+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19237diff -urNp linux-3.0.7/arch/x86/mm/extable.c linux-3.0.7/arch/x86/mm/extable.c
19238--- linux-3.0.7/arch/x86/mm/extable.c 2011-07-21 22:17:23.000000000 -0400
19239+++ linux-3.0.7/arch/x86/mm/extable.c 2011-08-23 21:47:55.000000000 -0400
19240@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19241 const struct exception_table_entry *fixup;
19242
19243 #ifdef CONFIG_PNPBIOS
19244- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19245+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19246 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19247 extern u32 pnp_bios_is_utter_crap;
19248 pnp_bios_is_utter_crap = 1;
19249diff -urNp linux-3.0.7/arch/x86/mm/fault.c linux-3.0.7/arch/x86/mm/fault.c
19250--- linux-3.0.7/arch/x86/mm/fault.c 2011-07-21 22:17:23.000000000 -0400
19251+++ linux-3.0.7/arch/x86/mm/fault.c 2011-10-06 04:17:55.000000000 -0400
19252@@ -13,10 +13,18 @@
19253 #include <linux/perf_event.h> /* perf_sw_event */
19254 #include <linux/hugetlb.h> /* hstate_index_to_shift */
19255 #include <linux/prefetch.h> /* prefetchw */
19256+#include <linux/unistd.h>
19257+#include <linux/compiler.h>
19258
19259 #include <asm/traps.h> /* dotraplinkage, ... */
19260 #include <asm/pgalloc.h> /* pgd_*(), ... */
19261 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19262+#include <asm/vsyscall.h>
19263+#include <asm/tlbflush.h>
19264+
19265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19266+#include <asm/stacktrace.h>
19267+#endif
19268
19269 /*
19270 * Page fault error code bits:
19271@@ -54,7 +62,7 @@ static inline int __kprobes notify_page_
19272 int ret = 0;
19273
19274 /* kprobe_running() needs smp_processor_id() */
19275- if (kprobes_built_in() && !user_mode_vm(regs)) {
19276+ if (kprobes_built_in() && !user_mode(regs)) {
19277 preempt_disable();
19278 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19279 ret = 1;
19280@@ -115,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19281 return !instr_lo || (instr_lo>>1) == 1;
19282 case 0x00:
19283 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19284- if (probe_kernel_address(instr, opcode))
19285+ if (user_mode(regs)) {
19286+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19287+ return 0;
19288+ } else if (probe_kernel_address(instr, opcode))
19289 return 0;
19290
19291 *prefetch = (instr_lo == 0xF) &&
19292@@ -149,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19293 while (instr < max_instr) {
19294 unsigned char opcode;
19295
19296- if (probe_kernel_address(instr, opcode))
19297+ if (user_mode(regs)) {
19298+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19299+ break;
19300+ } else if (probe_kernel_address(instr, opcode))
19301 break;
19302
19303 instr++;
19304@@ -180,6 +194,30 @@ force_sig_info_fault(int si_signo, int s
19305 force_sig_info(si_signo, &info, tsk);
19306 }
19307
19308+#ifdef CONFIG_PAX_EMUTRAMP
19309+static int pax_handle_fetch_fault(struct pt_regs *regs);
19310+#endif
19311+
19312+#ifdef CONFIG_PAX_PAGEEXEC
19313+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19314+{
19315+ pgd_t *pgd;
19316+ pud_t *pud;
19317+ pmd_t *pmd;
19318+
19319+ pgd = pgd_offset(mm, address);
19320+ if (!pgd_present(*pgd))
19321+ return NULL;
19322+ pud = pud_offset(pgd, address);
19323+ if (!pud_present(*pud))
19324+ return NULL;
19325+ pmd = pmd_offset(pud, address);
19326+ if (!pmd_present(*pmd))
19327+ return NULL;
19328+ return pmd;
19329+}
19330+#endif
19331+
19332 DEFINE_SPINLOCK(pgd_lock);
19333 LIST_HEAD(pgd_list);
19334
19335@@ -230,10 +268,22 @@ void vmalloc_sync_all(void)
19336 for (address = VMALLOC_START & PMD_MASK;
19337 address >= TASK_SIZE && address < FIXADDR_TOP;
19338 address += PMD_SIZE) {
19339+
19340+#ifdef CONFIG_PAX_PER_CPU_PGD
19341+ unsigned long cpu;
19342+#else
19343 struct page *page;
19344+#endif
19345
19346 spin_lock(&pgd_lock);
19347+
19348+#ifdef CONFIG_PAX_PER_CPU_PGD
19349+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19350+ pgd_t *pgd = get_cpu_pgd(cpu);
19351+ pmd_t *ret;
19352+#else
19353 list_for_each_entry(page, &pgd_list, lru) {
19354+ pgd_t *pgd = page_address(page);
19355 spinlock_t *pgt_lock;
19356 pmd_t *ret;
19357
19358@@ -241,8 +291,13 @@ void vmalloc_sync_all(void)
19359 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19360
19361 spin_lock(pgt_lock);
19362- ret = vmalloc_sync_one(page_address(page), address);
19363+#endif
19364+
19365+ ret = vmalloc_sync_one(pgd, address);
19366+
19367+#ifndef CONFIG_PAX_PER_CPU_PGD
19368 spin_unlock(pgt_lock);
19369+#endif
19370
19371 if (!ret)
19372 break;
19373@@ -276,6 +331,11 @@ static noinline __kprobes int vmalloc_fa
19374 * an interrupt in the middle of a task switch..
19375 */
19376 pgd_paddr = read_cr3();
19377+
19378+#ifdef CONFIG_PAX_PER_CPU_PGD
19379+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19380+#endif
19381+
19382 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19383 if (!pmd_k)
19384 return -1;
19385@@ -371,7 +431,14 @@ static noinline __kprobes int vmalloc_fa
19386 * happen within a race in page table update. In the later
19387 * case just flush:
19388 */
19389+
19390+#ifdef CONFIG_PAX_PER_CPU_PGD
19391+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19392+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19393+#else
19394 pgd = pgd_offset(current->active_mm, address);
19395+#endif
19396+
19397 pgd_ref = pgd_offset_k(address);
19398 if (pgd_none(*pgd_ref))
19399 return -1;
19400@@ -533,7 +600,7 @@ static int is_errata93(struct pt_regs *r
19401 static int is_errata100(struct pt_regs *regs, unsigned long address)
19402 {
19403 #ifdef CONFIG_X86_64
19404- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19405+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19406 return 1;
19407 #endif
19408 return 0;
19409@@ -560,7 +627,7 @@ static int is_f00f_bug(struct pt_regs *r
19410 }
19411
19412 static const char nx_warning[] = KERN_CRIT
19413-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19414+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19415
19416 static void
19417 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19418@@ -569,14 +636,25 @@ show_fault_oops(struct pt_regs *regs, un
19419 if (!oops_may_print())
19420 return;
19421
19422- if (error_code & PF_INSTR) {
19423+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
19424 unsigned int level;
19425
19426 pte_t *pte = lookup_address(address, &level);
19427
19428 if (pte && pte_present(*pte) && !pte_exec(*pte))
19429- printk(nx_warning, current_uid());
19430+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19431+ }
19432+
19433+#ifdef CONFIG_PAX_KERNEXEC
19434+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19435+ if (current->signal->curr_ip)
19436+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19437+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19438+ else
19439+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19440+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19441 }
19442+#endif
19443
19444 printk(KERN_ALERT "BUG: unable to handle kernel ");
19445 if (address < PAGE_SIZE)
19446@@ -702,6 +780,66 @@ __bad_area_nosemaphore(struct pt_regs *r
19447 unsigned long address, int si_code)
19448 {
19449 struct task_struct *tsk = current;
19450+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19451+ struct mm_struct *mm = tsk->mm;
19452+#endif
19453+
19454+#ifdef CONFIG_X86_64
19455+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19456+ if (regs->ip == VSYSCALL_ADDR(__NR_vgettimeofday) ||
19457+ regs->ip == VSYSCALL_ADDR(__NR_vtime) ||
19458+ regs->ip == VSYSCALL_ADDR(__NR_vgetcpu)) {
19459+ regs->ip += mm->context.vdso - PAGE_SIZE - VSYSCALL_START;
19460+ return;
19461+ }
19462+ }
19463+#endif
19464+
19465+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19466+ if (mm && (error_code & PF_USER)) {
19467+ unsigned long ip = regs->ip;
19468+
19469+ if (v8086_mode(regs))
19470+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19471+
19472+ /*
19473+ * It's possible to have interrupts off here:
19474+ */
19475+ local_irq_enable();
19476+
19477+#ifdef CONFIG_PAX_PAGEEXEC
19478+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19479+ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19480+
19481+#ifdef CONFIG_PAX_EMUTRAMP
19482+ switch (pax_handle_fetch_fault(regs)) {
19483+ case 2:
19484+ return;
19485+ }
19486+#endif
19487+
19488+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19489+ do_group_exit(SIGKILL);
19490+ }
19491+#endif
19492+
19493+#ifdef CONFIG_PAX_SEGMEXEC
19494+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19495+
19496+#ifdef CONFIG_PAX_EMUTRAMP
19497+ switch (pax_handle_fetch_fault(regs)) {
19498+ case 2:
19499+ return;
19500+ }
19501+#endif
19502+
19503+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19504+ do_group_exit(SIGKILL);
19505+ }
19506+#endif
19507+
19508+ }
19509+#endif
19510
19511 /* User mode accesses just cause a SIGSEGV */
19512 if (error_code & PF_USER) {
19513@@ -871,6 +1009,99 @@ static int spurious_fault_check(unsigned
19514 return 1;
19515 }
19516
19517+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19518+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19519+{
19520+ pte_t *pte;
19521+ pmd_t *pmd;
19522+ spinlock_t *ptl;
19523+ unsigned char pte_mask;
19524+
19525+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19526+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19527+ return 0;
19528+
19529+ /* PaX: it's our fault, let's handle it if we can */
19530+
19531+ /* PaX: take a look at read faults before acquiring any locks */
19532+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19533+ /* instruction fetch attempt from a protected page in user mode */
19534+ up_read(&mm->mmap_sem);
19535+
19536+#ifdef CONFIG_PAX_EMUTRAMP
19537+ switch (pax_handle_fetch_fault(regs)) {
19538+ case 2:
19539+ return 1;
19540+ }
19541+#endif
19542+
19543+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
19544+ do_group_exit(SIGKILL);
19545+ }
19546+
19547+ pmd = pax_get_pmd(mm, address);
19548+ if (unlikely(!pmd))
19549+ return 0;
19550+
19551+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
19552+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
19553+ pte_unmap_unlock(pte, ptl);
19554+ return 0;
19555+ }
19556+
19557+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
19558+ /* write attempt to a protected page in user mode */
19559+ pte_unmap_unlock(pte, ptl);
19560+ return 0;
19561+ }
19562+
19563+#ifdef CONFIG_SMP
19564+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
19565+#else
19566+ if (likely(address > get_limit(regs->cs)))
19567+#endif
19568+ {
19569+ set_pte(pte, pte_mkread(*pte));
19570+ __flush_tlb_one(address);
19571+ pte_unmap_unlock(pte, ptl);
19572+ up_read(&mm->mmap_sem);
19573+ return 1;
19574+ }
19575+
19576+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
19577+
19578+ /*
19579+ * PaX: fill DTLB with user rights and retry
19580+ */
19581+ __asm__ __volatile__ (
19582+ "orb %2,(%1)\n"
19583+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
19584+/*
19585+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
19586+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
19587+ * page fault when examined during a TLB load attempt. this is true not only
19588+ * for PTEs holding a non-present entry but also present entries that will
19589+ * raise a page fault (such as those set up by PaX, or the copy-on-write
19590+ * mechanism). in effect it means that we do *not* need to flush the TLBs
19591+ * for our target pages since their PTEs are simply not in the TLBs at all.
19592+
19593+ * the best thing in omitting it is that we gain around 15-20% speed in the
19594+ * fast path of the page fault handler and can get rid of tracing since we
19595+ * can no longer flush unintended entries.
19596+ */
19597+ "invlpg (%0)\n"
19598+#endif
19599+ __copyuser_seg"testb $0,(%0)\n"
19600+ "xorb %3,(%1)\n"
19601+ :
19602+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
19603+ : "memory", "cc");
19604+ pte_unmap_unlock(pte, ptl);
19605+ up_read(&mm->mmap_sem);
19606+ return 1;
19607+}
19608+#endif
19609+
19610 /*
19611 * Handle a spurious fault caused by a stale TLB entry.
19612 *
19613@@ -943,6 +1174,9 @@ int show_unhandled_signals = 1;
19614 static inline int
19615 access_error(unsigned long error_code, struct vm_area_struct *vma)
19616 {
19617+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
19618+ return 1;
19619+
19620 if (error_code & PF_WRITE) {
19621 /* write, present and write, not present: */
19622 if (unlikely(!(vma->vm_flags & VM_WRITE)))
19623@@ -976,19 +1210,33 @@ do_page_fault(struct pt_regs *regs, unsi
19624 {
19625 struct vm_area_struct *vma;
19626 struct task_struct *tsk;
19627- unsigned long address;
19628 struct mm_struct *mm;
19629 int fault;
19630 int write = error_code & PF_WRITE;
19631 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
19632 (write ? FAULT_FLAG_WRITE : 0);
19633
19634+ /* Get the faulting address: */
19635+ unsigned long address = read_cr2();
19636+
19637+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19638+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
19639+ if (!search_exception_tables(regs->ip)) {
19640+ bad_area_nosemaphore(regs, error_code, address);
19641+ return;
19642+ }
19643+ if (address < PAX_USER_SHADOW_BASE) {
19644+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
19645+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
19646+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
19647+ } else
19648+ address -= PAX_USER_SHADOW_BASE;
19649+ }
19650+#endif
19651+
19652 tsk = current;
19653 mm = tsk->mm;
19654
19655- /* Get the faulting address: */
19656- address = read_cr2();
19657-
19658 /*
19659 * Detect and handle instructions that would cause a page fault for
19660 * both a tracked kernel page and a userspace page.
19661@@ -1048,7 +1296,7 @@ do_page_fault(struct pt_regs *regs, unsi
19662 * User-mode registers count as a user access even for any
19663 * potential system fault or CPU buglet:
19664 */
19665- if (user_mode_vm(regs)) {
19666+ if (user_mode(regs)) {
19667 local_irq_enable();
19668 error_code |= PF_USER;
19669 } else {
19670@@ -1103,6 +1351,11 @@ retry:
19671 might_sleep();
19672 }
19673
19674+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19675+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19676+ return;
19677+#endif
19678+
19679 vma = find_vma(mm, address);
19680 if (unlikely(!vma)) {
19681 bad_area(regs, error_code, address);
19682@@ -1114,18 +1367,24 @@ retry:
19683 bad_area(regs, error_code, address);
19684 return;
19685 }
19686- if (error_code & PF_USER) {
19687- /*
19688- * Accessing the stack below %sp is always a bug.
19689- * The large cushion allows instructions like enter
19690- * and pusha to work. ("enter $65535, $31" pushes
19691- * 32 pointers and then decrements %sp by 65535.)
19692- */
19693- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19694- bad_area(regs, error_code, address);
19695- return;
19696- }
19697+ /*
19698+ * Accessing the stack below %sp is always a bug.
19699+ * The large cushion allows instructions like enter
19700+ * and pusha to work. ("enter $65535, $31" pushes
19701+ * 32 pointers and then decrements %sp by 65535.)
19702+ */
19703+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19704+ bad_area(regs, error_code, address);
19705+ return;
19706 }
19707+
19708+#ifdef CONFIG_PAX_SEGMEXEC
19709+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19710+ bad_area(regs, error_code, address);
19711+ return;
19712+ }
19713+#endif
19714+
19715 if (unlikely(expand_stack(vma, address))) {
19716 bad_area(regs, error_code, address);
19717 return;
19718@@ -1180,3 +1439,199 @@ good_area:
19719
19720 up_read(&mm->mmap_sem);
19721 }
19722+
19723+#ifdef CONFIG_PAX_EMUTRAMP
19724+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19725+{
19726+ int err;
19727+
19728+ do { /* PaX: gcc trampoline emulation #1 */
19729+ unsigned char mov1, mov2;
19730+ unsigned short jmp;
19731+ unsigned int addr1, addr2;
19732+
19733+#ifdef CONFIG_X86_64
19734+ if ((regs->ip + 11) >> 32)
19735+ break;
19736+#endif
19737+
19738+ err = get_user(mov1, (unsigned char __user *)regs->ip);
19739+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19740+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19741+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19742+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19743+
19744+ if (err)
19745+ break;
19746+
19747+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19748+ regs->cx = addr1;
19749+ regs->ax = addr2;
19750+ regs->ip = addr2;
19751+ return 2;
19752+ }
19753+ } while (0);
19754+
19755+ do { /* PaX: gcc trampoline emulation #2 */
19756+ unsigned char mov, jmp;
19757+ unsigned int addr1, addr2;
19758+
19759+#ifdef CONFIG_X86_64
19760+ if ((regs->ip + 9) >> 32)
19761+ break;
19762+#endif
19763+
19764+ err = get_user(mov, (unsigned char __user *)regs->ip);
19765+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19766+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19767+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19768+
19769+ if (err)
19770+ break;
19771+
19772+ if (mov == 0xB9 && jmp == 0xE9) {
19773+ regs->cx = addr1;
19774+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19775+ return 2;
19776+ }
19777+ } while (0);
19778+
19779+ return 1; /* PaX in action */
19780+}
19781+
19782+#ifdef CONFIG_X86_64
19783+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19784+{
19785+ int err;
19786+
19787+ do { /* PaX: gcc trampoline emulation #1 */
19788+ unsigned short mov1, mov2, jmp1;
19789+ unsigned char jmp2;
19790+ unsigned int addr1;
19791+ unsigned long addr2;
19792+
19793+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19794+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19795+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19796+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19797+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19798+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19799+
19800+ if (err)
19801+ break;
19802+
19803+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19804+ regs->r11 = addr1;
19805+ regs->r10 = addr2;
19806+ regs->ip = addr1;
19807+ return 2;
19808+ }
19809+ } while (0);
19810+
19811+ do { /* PaX: gcc trampoline emulation #2 */
19812+ unsigned short mov1, mov2, jmp1;
19813+ unsigned char jmp2;
19814+ unsigned long addr1, addr2;
19815+
19816+ err = get_user(mov1, (unsigned short __user *)regs->ip);
19817+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19818+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19819+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19820+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19821+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19822+
19823+ if (err)
19824+ break;
19825+
19826+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19827+ regs->r11 = addr1;
19828+ regs->r10 = addr2;
19829+ regs->ip = addr1;
19830+ return 2;
19831+ }
19832+ } while (0);
19833+
19834+ return 1; /* PaX in action */
19835+}
19836+#endif
19837+
19838+/*
19839+ * PaX: decide what to do with offenders (regs->ip = fault address)
19840+ *
19841+ * returns 1 when task should be killed
19842+ * 2 when gcc trampoline was detected
19843+ */
19844+static int pax_handle_fetch_fault(struct pt_regs *regs)
19845+{
19846+ if (v8086_mode(regs))
19847+ return 1;
19848+
19849+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19850+ return 1;
19851+
19852+#ifdef CONFIG_X86_32
19853+ return pax_handle_fetch_fault_32(regs);
19854+#else
19855+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19856+ return pax_handle_fetch_fault_32(regs);
19857+ else
19858+ return pax_handle_fetch_fault_64(regs);
19859+#endif
19860+}
19861+#endif
19862+
19863+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19864+void pax_report_insns(void *pc, void *sp)
19865+{
19866+ long i;
19867+
19868+ printk(KERN_ERR "PAX: bytes at PC: ");
19869+ for (i = 0; i < 20; i++) {
19870+ unsigned char c;
19871+ if (get_user(c, (unsigned char __force_user *)pc+i))
19872+ printk(KERN_CONT "?? ");
19873+ else
19874+ printk(KERN_CONT "%02x ", c);
19875+ }
19876+ printk("\n");
19877+
19878+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19879+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
19880+ unsigned long c;
19881+ if (get_user(c, (unsigned long __force_user *)sp+i))
19882+#ifdef CONFIG_X86_32
19883+ printk(KERN_CONT "???????? ");
19884+#else
19885+ printk(KERN_CONT "???????????????? ");
19886+#endif
19887+ else
19888+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19889+ }
19890+ printk("\n");
19891+}
19892+#endif
19893+
19894+/**
19895+ * probe_kernel_write(): safely attempt to write to a location
19896+ * @dst: address to write to
19897+ * @src: pointer to the data that shall be written
19898+ * @size: size of the data chunk
19899+ *
19900+ * Safely write to address @dst from the buffer at @src. If a kernel fault
19901+ * happens, handle that and return -EFAULT.
19902+ */
19903+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19904+{
19905+ long ret;
19906+ mm_segment_t old_fs = get_fs();
19907+
19908+ set_fs(KERNEL_DS);
19909+ pagefault_disable();
19910+ pax_open_kernel();
19911+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
19912+ pax_close_kernel();
19913+ pagefault_enable();
19914+ set_fs(old_fs);
19915+
19916+ return ret ? -EFAULT : 0;
19917+}
19918diff -urNp linux-3.0.7/arch/x86/mm/gup.c linux-3.0.7/arch/x86/mm/gup.c
19919--- linux-3.0.7/arch/x86/mm/gup.c 2011-07-21 22:17:23.000000000 -0400
19920+++ linux-3.0.7/arch/x86/mm/gup.c 2011-08-23 21:47:55.000000000 -0400
19921@@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19922 addr = start;
19923 len = (unsigned long) nr_pages << PAGE_SHIFT;
19924 end = start + len;
19925- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19926+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19927 (void __user *)start, len)))
19928 return 0;
19929
19930diff -urNp linux-3.0.7/arch/x86/mm/highmem_32.c linux-3.0.7/arch/x86/mm/highmem_32.c
19931--- linux-3.0.7/arch/x86/mm/highmem_32.c 2011-07-21 22:17:23.000000000 -0400
19932+++ linux-3.0.7/arch/x86/mm/highmem_32.c 2011-08-23 21:47:55.000000000 -0400
19933@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19934 idx = type + KM_TYPE_NR*smp_processor_id();
19935 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19936 BUG_ON(!pte_none(*(kmap_pte-idx)));
19937+
19938+ pax_open_kernel();
19939 set_pte(kmap_pte-idx, mk_pte(page, prot));
19940+ pax_close_kernel();
19941
19942 return (void *)vaddr;
19943 }
19944diff -urNp linux-3.0.7/arch/x86/mm/hugetlbpage.c linux-3.0.7/arch/x86/mm/hugetlbpage.c
19945--- linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-07-21 22:17:23.000000000 -0400
19946+++ linux-3.0.7/arch/x86/mm/hugetlbpage.c 2011-08-23 21:47:55.000000000 -0400
19947@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19948 struct hstate *h = hstate_file(file);
19949 struct mm_struct *mm = current->mm;
19950 struct vm_area_struct *vma;
19951- unsigned long start_addr;
19952+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19953+
19954+#ifdef CONFIG_PAX_SEGMEXEC
19955+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19956+ pax_task_size = SEGMEXEC_TASK_SIZE;
19957+#endif
19958+
19959+ pax_task_size -= PAGE_SIZE;
19960
19961 if (len > mm->cached_hole_size) {
19962- start_addr = mm->free_area_cache;
19963+ start_addr = mm->free_area_cache;
19964 } else {
19965- start_addr = TASK_UNMAPPED_BASE;
19966- mm->cached_hole_size = 0;
19967+ start_addr = mm->mmap_base;
19968+ mm->cached_hole_size = 0;
19969 }
19970
19971 full_search:
19972@@ -280,26 +287,27 @@ full_search:
19973
19974 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19975 /* At this point: (!vma || addr < vma->vm_end). */
19976- if (TASK_SIZE - len < addr) {
19977+ if (pax_task_size - len < addr) {
19978 /*
19979 * Start a new search - just in case we missed
19980 * some holes.
19981 */
19982- if (start_addr != TASK_UNMAPPED_BASE) {
19983- start_addr = TASK_UNMAPPED_BASE;
19984+ if (start_addr != mm->mmap_base) {
19985+ start_addr = mm->mmap_base;
19986 mm->cached_hole_size = 0;
19987 goto full_search;
19988 }
19989 return -ENOMEM;
19990 }
19991- if (!vma || addr + len <= vma->vm_start) {
19992- mm->free_area_cache = addr + len;
19993- return addr;
19994- }
19995+ if (check_heap_stack_gap(vma, addr, len))
19996+ break;
19997 if (addr + mm->cached_hole_size < vma->vm_start)
19998 mm->cached_hole_size = vma->vm_start - addr;
19999 addr = ALIGN(vma->vm_end, huge_page_size(h));
20000 }
20001+
20002+ mm->free_area_cache = addr + len;
20003+ return addr;
20004 }
20005
20006 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20007@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20008 {
20009 struct hstate *h = hstate_file(file);
20010 struct mm_struct *mm = current->mm;
20011- struct vm_area_struct *vma, *prev_vma;
20012- unsigned long base = mm->mmap_base, addr = addr0;
20013+ struct vm_area_struct *vma;
20014+ unsigned long base = mm->mmap_base, addr;
20015 unsigned long largest_hole = mm->cached_hole_size;
20016- int first_time = 1;
20017
20018 /* don't allow allocations above current base */
20019 if (mm->free_area_cache > base)
20020@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20021 largest_hole = 0;
20022 mm->free_area_cache = base;
20023 }
20024-try_again:
20025+
20026 /* make sure it can fit in the remaining address space */
20027 if (mm->free_area_cache < len)
20028 goto fail;
20029
20030 /* either no address requested or can't fit in requested address hole */
20031- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20032+ addr = (mm->free_area_cache - len);
20033 do {
20034+ addr &= huge_page_mask(h);
20035+ vma = find_vma(mm, addr);
20036 /*
20037 * Lookup failure means no vma is above this address,
20038 * i.e. return with success:
20039- */
20040- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20041- return addr;
20042-
20043- /*
20044 * new region fits between prev_vma->vm_end and
20045 * vma->vm_start, use it:
20046 */
20047- if (addr + len <= vma->vm_start &&
20048- (!prev_vma || (addr >= prev_vma->vm_end))) {
20049+ if (check_heap_stack_gap(vma, addr, len)) {
20050 /* remember the address as a hint for next time */
20051- mm->cached_hole_size = largest_hole;
20052- return (mm->free_area_cache = addr);
20053- } else {
20054- /* pull free_area_cache down to the first hole */
20055- if (mm->free_area_cache == vma->vm_end) {
20056- mm->free_area_cache = vma->vm_start;
20057- mm->cached_hole_size = largest_hole;
20058- }
20059+ mm->cached_hole_size = largest_hole;
20060+ return (mm->free_area_cache = addr);
20061+ }
20062+ /* pull free_area_cache down to the first hole */
20063+ if (mm->free_area_cache == vma->vm_end) {
20064+ mm->free_area_cache = vma->vm_start;
20065+ mm->cached_hole_size = largest_hole;
20066 }
20067
20068 /* remember the largest hole we saw so far */
20069 if (addr + largest_hole < vma->vm_start)
20070- largest_hole = vma->vm_start - addr;
20071+ largest_hole = vma->vm_start - addr;
20072
20073 /* try just below the current vma->vm_start */
20074- addr = (vma->vm_start - len) & huge_page_mask(h);
20075- } while (len <= vma->vm_start);
20076+ addr = skip_heap_stack_gap(vma, len);
20077+ } while (!IS_ERR_VALUE(addr));
20078
20079 fail:
20080 /*
20081- * if hint left us with no space for the requested
20082- * mapping then try again:
20083- */
20084- if (first_time) {
20085- mm->free_area_cache = base;
20086- largest_hole = 0;
20087- first_time = 0;
20088- goto try_again;
20089- }
20090- /*
20091 * A failed mmap() very likely causes application failure,
20092 * so fall back to the bottom-up function here. This scenario
20093 * can happen with large stack limits and large mmap()
20094 * allocations.
20095 */
20096- mm->free_area_cache = TASK_UNMAPPED_BASE;
20097+
20098+#ifdef CONFIG_PAX_SEGMEXEC
20099+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20100+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20101+ else
20102+#endif
20103+
20104+ mm->mmap_base = TASK_UNMAPPED_BASE;
20105+
20106+#ifdef CONFIG_PAX_RANDMMAP
20107+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20108+ mm->mmap_base += mm->delta_mmap;
20109+#endif
20110+
20111+ mm->free_area_cache = mm->mmap_base;
20112 mm->cached_hole_size = ~0UL;
20113 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20114 len, pgoff, flags);
20115@@ -386,6 +392,7 @@ fail:
20116 /*
20117 * Restore the topdown base:
20118 */
20119+ mm->mmap_base = base;
20120 mm->free_area_cache = base;
20121 mm->cached_hole_size = ~0UL;
20122
20123@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20124 struct hstate *h = hstate_file(file);
20125 struct mm_struct *mm = current->mm;
20126 struct vm_area_struct *vma;
20127+ unsigned long pax_task_size = TASK_SIZE;
20128
20129 if (len & ~huge_page_mask(h))
20130 return -EINVAL;
20131- if (len > TASK_SIZE)
20132+
20133+#ifdef CONFIG_PAX_SEGMEXEC
20134+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20135+ pax_task_size = SEGMEXEC_TASK_SIZE;
20136+#endif
20137+
20138+ pax_task_size -= PAGE_SIZE;
20139+
20140+ if (len > pax_task_size)
20141 return -ENOMEM;
20142
20143 if (flags & MAP_FIXED) {
20144@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20145 if (addr) {
20146 addr = ALIGN(addr, huge_page_size(h));
20147 vma = find_vma(mm, addr);
20148- if (TASK_SIZE - len >= addr &&
20149- (!vma || addr + len <= vma->vm_start))
20150+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20151 return addr;
20152 }
20153 if (mm->get_unmapped_area == arch_get_unmapped_area)
20154diff -urNp linux-3.0.7/arch/x86/mm/init_32.c linux-3.0.7/arch/x86/mm/init_32.c
20155--- linux-3.0.7/arch/x86/mm/init_32.c 2011-07-21 22:17:23.000000000 -0400
20156+++ linux-3.0.7/arch/x86/mm/init_32.c 2011-08-23 21:47:55.000000000 -0400
20157@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20158 }
20159
20160 /*
20161- * Creates a middle page table and puts a pointer to it in the
20162- * given global directory entry. This only returns the gd entry
20163- * in non-PAE compilation mode, since the middle layer is folded.
20164- */
20165-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20166-{
20167- pud_t *pud;
20168- pmd_t *pmd_table;
20169-
20170-#ifdef CONFIG_X86_PAE
20171- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20172- if (after_bootmem)
20173- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20174- else
20175- pmd_table = (pmd_t *)alloc_low_page();
20176- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20177- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20178- pud = pud_offset(pgd, 0);
20179- BUG_ON(pmd_table != pmd_offset(pud, 0));
20180-
20181- return pmd_table;
20182- }
20183-#endif
20184- pud = pud_offset(pgd, 0);
20185- pmd_table = pmd_offset(pud, 0);
20186-
20187- return pmd_table;
20188-}
20189-
20190-/*
20191 * Create a page table and place a pointer to it in a middle page
20192 * directory entry:
20193 */
20194@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20195 page_table = (pte_t *)alloc_low_page();
20196
20197 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20198+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20199+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20200+#else
20201 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20202+#endif
20203 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20204 }
20205
20206 return pte_offset_kernel(pmd, 0);
20207 }
20208
20209+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20210+{
20211+ pud_t *pud;
20212+ pmd_t *pmd_table;
20213+
20214+ pud = pud_offset(pgd, 0);
20215+ pmd_table = pmd_offset(pud, 0);
20216+
20217+ return pmd_table;
20218+}
20219+
20220 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20221 {
20222 int pgd_idx = pgd_index(vaddr);
20223@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20224 int pgd_idx, pmd_idx;
20225 unsigned long vaddr;
20226 pgd_t *pgd;
20227+ pud_t *pud;
20228 pmd_t *pmd;
20229 pte_t *pte = NULL;
20230
20231@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20232 pgd = pgd_base + pgd_idx;
20233
20234 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20235- pmd = one_md_table_init(pgd);
20236- pmd = pmd + pmd_index(vaddr);
20237+ pud = pud_offset(pgd, vaddr);
20238+ pmd = pmd_offset(pud, vaddr);
20239+
20240+#ifdef CONFIG_X86_PAE
20241+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20242+#endif
20243+
20244 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20245 pmd++, pmd_idx++) {
20246 pte = page_table_kmap_check(one_page_table_init(pmd),
20247@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20248 }
20249 }
20250
20251-static inline int is_kernel_text(unsigned long addr)
20252+static inline int is_kernel_text(unsigned long start, unsigned long end)
20253 {
20254- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20255- return 1;
20256- return 0;
20257+ if ((start > ktla_ktva((unsigned long)_etext) ||
20258+ end <= ktla_ktva((unsigned long)_stext)) &&
20259+ (start > ktla_ktva((unsigned long)_einittext) ||
20260+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20261+
20262+#ifdef CONFIG_ACPI_SLEEP
20263+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20264+#endif
20265+
20266+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20267+ return 0;
20268+ return 1;
20269 }
20270
20271 /*
20272@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20273 unsigned long last_map_addr = end;
20274 unsigned long start_pfn, end_pfn;
20275 pgd_t *pgd_base = swapper_pg_dir;
20276- int pgd_idx, pmd_idx, pte_ofs;
20277+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20278 unsigned long pfn;
20279 pgd_t *pgd;
20280+ pud_t *pud;
20281 pmd_t *pmd;
20282 pte_t *pte;
20283 unsigned pages_2m, pages_4k;
20284@@ -281,8 +282,13 @@ repeat:
20285 pfn = start_pfn;
20286 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20287 pgd = pgd_base + pgd_idx;
20288- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20289- pmd = one_md_table_init(pgd);
20290+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20291+ pud = pud_offset(pgd, 0);
20292+ pmd = pmd_offset(pud, 0);
20293+
20294+#ifdef CONFIG_X86_PAE
20295+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20296+#endif
20297
20298 if (pfn >= end_pfn)
20299 continue;
20300@@ -294,14 +300,13 @@ repeat:
20301 #endif
20302 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20303 pmd++, pmd_idx++) {
20304- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20305+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20306
20307 /*
20308 * Map with big pages if possible, otherwise
20309 * create normal page tables:
20310 */
20311 if (use_pse) {
20312- unsigned int addr2;
20313 pgprot_t prot = PAGE_KERNEL_LARGE;
20314 /*
20315 * first pass will use the same initial
20316@@ -311,11 +316,7 @@ repeat:
20317 __pgprot(PTE_IDENT_ATTR |
20318 _PAGE_PSE);
20319
20320- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20321- PAGE_OFFSET + PAGE_SIZE-1;
20322-
20323- if (is_kernel_text(addr) ||
20324- is_kernel_text(addr2))
20325+ if (is_kernel_text(address, address + PMD_SIZE))
20326 prot = PAGE_KERNEL_LARGE_EXEC;
20327
20328 pages_2m++;
20329@@ -332,7 +333,7 @@ repeat:
20330 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20331 pte += pte_ofs;
20332 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20333- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20334+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20335 pgprot_t prot = PAGE_KERNEL;
20336 /*
20337 * first pass will use the same initial
20338@@ -340,7 +341,7 @@ repeat:
20339 */
20340 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20341
20342- if (is_kernel_text(addr))
20343+ if (is_kernel_text(address, address + PAGE_SIZE))
20344 prot = PAGE_KERNEL_EXEC;
20345
20346 pages_4k++;
20347@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20348
20349 pud = pud_offset(pgd, va);
20350 pmd = pmd_offset(pud, va);
20351- if (!pmd_present(*pmd))
20352+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20353 break;
20354
20355 pte = pte_offset_kernel(pmd, va);
20356@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20357
20358 static void __init pagetable_init(void)
20359 {
20360- pgd_t *pgd_base = swapper_pg_dir;
20361-
20362- permanent_kmaps_init(pgd_base);
20363+ permanent_kmaps_init(swapper_pg_dir);
20364 }
20365
20366-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20367+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20368 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20369
20370 /* user-defined highmem size */
20371@@ -757,6 +756,12 @@ void __init mem_init(void)
20372
20373 pci_iommu_alloc();
20374
20375+#ifdef CONFIG_PAX_PER_CPU_PGD
20376+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20377+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20378+ KERNEL_PGD_PTRS);
20379+#endif
20380+
20381 #ifdef CONFIG_FLATMEM
20382 BUG_ON(!mem_map);
20383 #endif
20384@@ -774,7 +779,7 @@ void __init mem_init(void)
20385 set_highmem_pages_init();
20386
20387 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20388- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20389+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20390 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20391
20392 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20393@@ -815,10 +820,10 @@ void __init mem_init(void)
20394 ((unsigned long)&__init_end -
20395 (unsigned long)&__init_begin) >> 10,
20396
20397- (unsigned long)&_etext, (unsigned long)&_edata,
20398- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20399+ (unsigned long)&_sdata, (unsigned long)&_edata,
20400+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20401
20402- (unsigned long)&_text, (unsigned long)&_etext,
20403+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20404 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20405
20406 /*
20407@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
20408 if (!kernel_set_to_readonly)
20409 return;
20410
20411+ start = ktla_ktva(start);
20412 pr_debug("Set kernel text: %lx - %lx for read write\n",
20413 start, start+size);
20414
20415@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
20416 if (!kernel_set_to_readonly)
20417 return;
20418
20419+ start = ktla_ktva(start);
20420 pr_debug("Set kernel text: %lx - %lx for read only\n",
20421 start, start+size);
20422
20423@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
20424 unsigned long start = PFN_ALIGN(_text);
20425 unsigned long size = PFN_ALIGN(_etext) - start;
20426
20427+ start = ktla_ktva(start);
20428 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20429 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20430 size >> 10);
20431diff -urNp linux-3.0.7/arch/x86/mm/init_64.c linux-3.0.7/arch/x86/mm/init_64.c
20432--- linux-3.0.7/arch/x86/mm/init_64.c 2011-07-21 22:17:23.000000000 -0400
20433+++ linux-3.0.7/arch/x86/mm/init_64.c 2011-10-06 04:17:55.000000000 -0400
20434@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
20435 * around without checking the pgd every time.
20436 */
20437
20438-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
20439+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
20440 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20441
20442 int force_personality32;
20443@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
20444
20445 for (address = start; address <= end; address += PGDIR_SIZE) {
20446 const pgd_t *pgd_ref = pgd_offset_k(address);
20447+
20448+#ifdef CONFIG_PAX_PER_CPU_PGD
20449+ unsigned long cpu;
20450+#else
20451 struct page *page;
20452+#endif
20453
20454 if (pgd_none(*pgd_ref))
20455 continue;
20456
20457 spin_lock(&pgd_lock);
20458+
20459+#ifdef CONFIG_PAX_PER_CPU_PGD
20460+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20461+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20462+#else
20463 list_for_each_entry(page, &pgd_list, lru) {
20464 pgd_t *pgd;
20465 spinlock_t *pgt_lock;
20466@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
20467 /* the pgt_lock only for Xen */
20468 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
20469 spin_lock(pgt_lock);
20470+#endif
20471
20472 if (pgd_none(*pgd))
20473 set_pgd(pgd, *pgd_ref);
20474@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
20475 BUG_ON(pgd_page_vaddr(*pgd)
20476 != pgd_page_vaddr(*pgd_ref));
20477
20478+#ifndef CONFIG_PAX_PER_CPU_PGD
20479 spin_unlock(pgt_lock);
20480+#endif
20481+
20482 }
20483 spin_unlock(&pgd_lock);
20484 }
20485@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20486 pmd = fill_pmd(pud, vaddr);
20487 pte = fill_pte(pmd, vaddr);
20488
20489+ pax_open_kernel();
20490 set_pte(pte, new_pte);
20491+ pax_close_kernel();
20492
20493 /*
20494 * It's enough to flush this one mapping.
20495@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
20496 pgd = pgd_offset_k((unsigned long)__va(phys));
20497 if (pgd_none(*pgd)) {
20498 pud = (pud_t *) spp_getpage();
20499- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20500- _PAGE_USER));
20501+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20502 }
20503 pud = pud_offset(pgd, (unsigned long)__va(phys));
20504 if (pud_none(*pud)) {
20505 pmd = (pmd_t *) spp_getpage();
20506- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20507- _PAGE_USER));
20508+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20509 }
20510 pmd = pmd_offset(pud, phys);
20511 BUG_ON(!pmd_none(*pmd));
20512@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
20513 if (pfn >= pgt_buf_top)
20514 panic("alloc_low_page: ran out of memory");
20515
20516- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20517+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
20518 clear_page(adr);
20519 *phys = pfn * PAGE_SIZE;
20520 return adr;
20521@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
20522
20523 phys = __pa(virt);
20524 left = phys & (PAGE_SIZE - 1);
20525- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20526+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
20527 adr = (void *)(((unsigned long)adr) | left);
20528
20529 return adr;
20530@@ -693,6 +707,12 @@ void __init mem_init(void)
20531
20532 pci_iommu_alloc();
20533
20534+#ifdef CONFIG_PAX_PER_CPU_PGD
20535+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20536+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20537+ KERNEL_PGD_PTRS);
20538+#endif
20539+
20540 /* clear_bss() already clear the empty_zero_page */
20541
20542 reservedpages = 0;
20543@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
20544 static struct vm_area_struct gate_vma = {
20545 .vm_start = VSYSCALL_START,
20546 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20547- .vm_page_prot = PAGE_READONLY_EXEC,
20548- .vm_flags = VM_READ | VM_EXEC
20549+ .vm_page_prot = PAGE_READONLY,
20550+ .vm_flags = VM_READ
20551 };
20552
20553 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
20554@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
20555
20556 const char *arch_vma_name(struct vm_area_struct *vma)
20557 {
20558- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20559+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20560 return "[vdso]";
20561 if (vma == &gate_vma)
20562 return "[vsyscall]";
20563diff -urNp linux-3.0.7/arch/x86/mm/init.c linux-3.0.7/arch/x86/mm/init.c
20564--- linux-3.0.7/arch/x86/mm/init.c 2011-07-21 22:17:23.000000000 -0400
20565+++ linux-3.0.7/arch/x86/mm/init.c 2011-08-23 21:48:14.000000000 -0400
20566@@ -31,7 +31,7 @@ int direct_gbpages
20567 static void __init find_early_table_space(unsigned long end, int use_pse,
20568 int use_gbpages)
20569 {
20570- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
20571+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
20572 phys_addr_t base;
20573
20574 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
20575@@ -313,12 +313,34 @@ unsigned long __init_refok init_memory_m
20576 */
20577 int devmem_is_allowed(unsigned long pagenr)
20578 {
20579- if (pagenr <= 256)
20580+#ifdef CONFIG_GRKERNSEC_KMEM
20581+ /* allow BDA */
20582+ if (!pagenr)
20583+ return 1;
20584+ /* allow EBDA */
20585+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
20586+ return 1;
20587+#else
20588+ if (!pagenr)
20589+ return 1;
20590+#ifdef CONFIG_VM86
20591+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
20592+ return 1;
20593+#endif
20594+#endif
20595+
20596+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
20597 return 1;
20598+#ifdef CONFIG_GRKERNSEC_KMEM
20599+ /* throw out everything else below 1MB */
20600+ if (pagenr <= 256)
20601+ return 0;
20602+#endif
20603 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
20604 return 0;
20605 if (!page_is_ram(pagenr))
20606 return 1;
20607+
20608 return 0;
20609 }
20610
20611@@ -373,6 +395,86 @@ void free_init_pages(char *what, unsigne
20612
20613 void free_initmem(void)
20614 {
20615+
20616+#ifdef CONFIG_PAX_KERNEXEC
20617+#ifdef CONFIG_X86_32
20618+ /* PaX: limit KERNEL_CS to actual size */
20619+ unsigned long addr, limit;
20620+ struct desc_struct d;
20621+ int cpu;
20622+
20623+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
20624+ limit = (limit - 1UL) >> PAGE_SHIFT;
20625+
20626+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
20627+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
20628+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
20629+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
20630+ }
20631+
20632+ /* PaX: make KERNEL_CS read-only */
20633+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
20634+ if (!paravirt_enabled())
20635+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
20636+/*
20637+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
20638+ pgd = pgd_offset_k(addr);
20639+ pud = pud_offset(pgd, addr);
20640+ pmd = pmd_offset(pud, addr);
20641+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20642+ }
20643+*/
20644+#ifdef CONFIG_X86_PAE
20645+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
20646+/*
20647+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
20648+ pgd = pgd_offset_k(addr);
20649+ pud = pud_offset(pgd, addr);
20650+ pmd = pmd_offset(pud, addr);
20651+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20652+ }
20653+*/
20654+#endif
20655+
20656+#ifdef CONFIG_MODULES
20657+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
20658+#endif
20659+
20660+#else
20661+ pgd_t *pgd;
20662+ pud_t *pud;
20663+ pmd_t *pmd;
20664+ unsigned long addr, end;
20665+
20666+ /* PaX: make kernel code/rodata read-only, rest non-executable */
20667+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
20668+ pgd = pgd_offset_k(addr);
20669+ pud = pud_offset(pgd, addr);
20670+ pmd = pmd_offset(pud, addr);
20671+ if (!pmd_present(*pmd))
20672+ continue;
20673+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
20674+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20675+ else
20676+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
20677+ }
20678+
20679+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
20680+ end = addr + KERNEL_IMAGE_SIZE;
20681+ for (; addr < end; addr += PMD_SIZE) {
20682+ pgd = pgd_offset_k(addr);
20683+ pud = pud_offset(pgd, addr);
20684+ pmd = pmd_offset(pud, addr);
20685+ if (!pmd_present(*pmd))
20686+ continue;
20687+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
20688+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
20689+ }
20690+#endif
20691+
20692+ flush_tlb_all();
20693+#endif
20694+
20695 free_init_pages("unused kernel memory",
20696 (unsigned long)(&__init_begin),
20697 (unsigned long)(&__init_end));
20698diff -urNp linux-3.0.7/arch/x86/mm/iomap_32.c linux-3.0.7/arch/x86/mm/iomap_32.c
20699--- linux-3.0.7/arch/x86/mm/iomap_32.c 2011-07-21 22:17:23.000000000 -0400
20700+++ linux-3.0.7/arch/x86/mm/iomap_32.c 2011-08-23 21:47:55.000000000 -0400
20701@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20702 type = kmap_atomic_idx_push();
20703 idx = type + KM_TYPE_NR * smp_processor_id();
20704 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20705+
20706+ pax_open_kernel();
20707 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20708+ pax_close_kernel();
20709+
20710 arch_flush_lazy_mmu_mode();
20711
20712 return (void *)vaddr;
20713diff -urNp linux-3.0.7/arch/x86/mm/ioremap.c linux-3.0.7/arch/x86/mm/ioremap.c
20714--- linux-3.0.7/arch/x86/mm/ioremap.c 2011-07-21 22:17:23.000000000 -0400
20715+++ linux-3.0.7/arch/x86/mm/ioremap.c 2011-08-23 21:47:55.000000000 -0400
20716@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
20717 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20718 int is_ram = page_is_ram(pfn);
20719
20720- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20721+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20722 return NULL;
20723 WARN_ON_ONCE(is_ram);
20724 }
20725@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20726 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20727
20728 static __initdata int after_paging_init;
20729-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20730+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20731
20732 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20733 {
20734@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20735 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20736
20737 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20738- memset(bm_pte, 0, sizeof(bm_pte));
20739- pmd_populate_kernel(&init_mm, pmd, bm_pte);
20740+ pmd_populate_user(&init_mm, pmd, bm_pte);
20741
20742 /*
20743 * The boot-ioremap range spans multiple pmds, for which
20744diff -urNp linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c
20745--- linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-07-21 22:17:23.000000000 -0400
20746+++ linux-3.0.7/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-23 21:47:55.000000000 -0400
20747@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20748 * memory (e.g. tracked pages)? For now, we need this to avoid
20749 * invoking kmemcheck for PnP BIOS calls.
20750 */
20751- if (regs->flags & X86_VM_MASK)
20752+ if (v8086_mode(regs))
20753 return false;
20754- if (regs->cs != __KERNEL_CS)
20755+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20756 return false;
20757
20758 pte = kmemcheck_pte_lookup(address);
20759diff -urNp linux-3.0.7/arch/x86/mm/mmap.c linux-3.0.7/arch/x86/mm/mmap.c
20760--- linux-3.0.7/arch/x86/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
20761+++ linux-3.0.7/arch/x86/mm/mmap.c 2011-08-23 21:47:55.000000000 -0400
20762@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20763 * Leave an at least ~128 MB hole with possible stack randomization.
20764 */
20765 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20766-#define MAX_GAP (TASK_SIZE/6*5)
20767+#define MAX_GAP (pax_task_size/6*5)
20768
20769 /*
20770 * True on X86_32 or when emulating IA32 on X86_64
20771@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20772 return rnd << PAGE_SHIFT;
20773 }
20774
20775-static unsigned long mmap_base(void)
20776+static unsigned long mmap_base(struct mm_struct *mm)
20777 {
20778 unsigned long gap = rlimit(RLIMIT_STACK);
20779+ unsigned long pax_task_size = TASK_SIZE;
20780+
20781+#ifdef CONFIG_PAX_SEGMEXEC
20782+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20783+ pax_task_size = SEGMEXEC_TASK_SIZE;
20784+#endif
20785
20786 if (gap < MIN_GAP)
20787 gap = MIN_GAP;
20788 else if (gap > MAX_GAP)
20789 gap = MAX_GAP;
20790
20791- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20792+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20793 }
20794
20795 /*
20796 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20797 * does, but not when emulating X86_32
20798 */
20799-static unsigned long mmap_legacy_base(void)
20800+static unsigned long mmap_legacy_base(struct mm_struct *mm)
20801 {
20802- if (mmap_is_ia32())
20803+ if (mmap_is_ia32()) {
20804+
20805+#ifdef CONFIG_PAX_SEGMEXEC
20806+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20807+ return SEGMEXEC_TASK_UNMAPPED_BASE;
20808+ else
20809+#endif
20810+
20811 return TASK_UNMAPPED_BASE;
20812- else
20813+ } else
20814 return TASK_UNMAPPED_BASE + mmap_rnd();
20815 }
20816
20817@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20818 void arch_pick_mmap_layout(struct mm_struct *mm)
20819 {
20820 if (mmap_is_legacy()) {
20821- mm->mmap_base = mmap_legacy_base();
20822+ mm->mmap_base = mmap_legacy_base(mm);
20823+
20824+#ifdef CONFIG_PAX_RANDMMAP
20825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20826+ mm->mmap_base += mm->delta_mmap;
20827+#endif
20828+
20829 mm->get_unmapped_area = arch_get_unmapped_area;
20830 mm->unmap_area = arch_unmap_area;
20831 } else {
20832- mm->mmap_base = mmap_base();
20833+ mm->mmap_base = mmap_base(mm);
20834+
20835+#ifdef CONFIG_PAX_RANDMMAP
20836+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20837+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20838+#endif
20839+
20840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20841 mm->unmap_area = arch_unmap_area_topdown;
20842 }
20843diff -urNp linux-3.0.7/arch/x86/mm/mmio-mod.c linux-3.0.7/arch/x86/mm/mmio-mod.c
20844--- linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-07-21 22:17:23.000000000 -0400
20845+++ linux-3.0.7/arch/x86/mm/mmio-mod.c 2011-08-23 21:47:55.000000000 -0400
20846@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20847 break;
20848 default:
20849 {
20850- unsigned char *ip = (unsigned char *)instptr;
20851+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20852 my_trace->opcode = MMIO_UNKNOWN_OP;
20853 my_trace->width = 0;
20854 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20855@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20856 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20857 void __iomem *addr)
20858 {
20859- static atomic_t next_id;
20860+ static atomic_unchecked_t next_id;
20861 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20862 /* These are page-unaligned. */
20863 struct mmiotrace_map map = {
20864@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20865 .private = trace
20866 },
20867 .phys = offset,
20868- .id = atomic_inc_return(&next_id)
20869+ .id = atomic_inc_return_unchecked(&next_id)
20870 };
20871 map.map_id = trace->id;
20872
20873diff -urNp linux-3.0.7/arch/x86/mm/pageattr.c linux-3.0.7/arch/x86/mm/pageattr.c
20874--- linux-3.0.7/arch/x86/mm/pageattr.c 2011-07-21 22:17:23.000000000 -0400
20875+++ linux-3.0.7/arch/x86/mm/pageattr.c 2011-08-23 21:47:55.000000000 -0400
20876@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20877 */
20878 #ifdef CONFIG_PCI_BIOS
20879 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20880- pgprot_val(forbidden) |= _PAGE_NX;
20881+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20882 #endif
20883
20884 /*
20885@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20886 * Does not cover __inittext since that is gone later on. On
20887 * 64bit we do not enforce !NX on the low mapping
20888 */
20889- if (within(address, (unsigned long)_text, (unsigned long)_etext))
20890- pgprot_val(forbidden) |= _PAGE_NX;
20891+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20892+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20893
20894+#ifdef CONFIG_DEBUG_RODATA
20895 /*
20896 * The .rodata section needs to be read-only. Using the pfn
20897 * catches all aliases.
20898@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20899 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20900 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20901 pgprot_val(forbidden) |= _PAGE_RW;
20902+#endif
20903
20904 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20905 /*
20906@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20907 }
20908 #endif
20909
20910+#ifdef CONFIG_PAX_KERNEXEC
20911+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20912+ pgprot_val(forbidden) |= _PAGE_RW;
20913+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20914+ }
20915+#endif
20916+
20917 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20918
20919 return prot;
20920@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20921 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20922 {
20923 /* change init_mm */
20924+ pax_open_kernel();
20925 set_pte_atomic(kpte, pte);
20926+
20927 #ifdef CONFIG_X86_32
20928 if (!SHARED_KERNEL_PMD) {
20929+
20930+#ifdef CONFIG_PAX_PER_CPU_PGD
20931+ unsigned long cpu;
20932+#else
20933 struct page *page;
20934+#endif
20935
20936+#ifdef CONFIG_PAX_PER_CPU_PGD
20937+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20938+ pgd_t *pgd = get_cpu_pgd(cpu);
20939+#else
20940 list_for_each_entry(page, &pgd_list, lru) {
20941- pgd_t *pgd;
20942+ pgd_t *pgd = (pgd_t *)page_address(page);
20943+#endif
20944+
20945 pud_t *pud;
20946 pmd_t *pmd;
20947
20948- pgd = (pgd_t *)page_address(page) + pgd_index(address);
20949+ pgd += pgd_index(address);
20950 pud = pud_offset(pgd, address);
20951 pmd = pmd_offset(pud, address);
20952 set_pte_atomic((pte_t *)pmd, pte);
20953 }
20954 }
20955 #endif
20956+ pax_close_kernel();
20957 }
20958
20959 static int
20960diff -urNp linux-3.0.7/arch/x86/mm/pageattr-test.c linux-3.0.7/arch/x86/mm/pageattr-test.c
20961--- linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-07-21 22:17:23.000000000 -0400
20962+++ linux-3.0.7/arch/x86/mm/pageattr-test.c 2011-08-23 21:47:55.000000000 -0400
20963@@ -36,7 +36,7 @@ enum {
20964
20965 static int pte_testbit(pte_t pte)
20966 {
20967- return pte_flags(pte) & _PAGE_UNUSED1;
20968+ return pte_flags(pte) & _PAGE_CPA_TEST;
20969 }
20970
20971 struct split_state {
20972diff -urNp linux-3.0.7/arch/x86/mm/pat.c linux-3.0.7/arch/x86/mm/pat.c
20973--- linux-3.0.7/arch/x86/mm/pat.c 2011-07-21 22:17:23.000000000 -0400
20974+++ linux-3.0.7/arch/x86/mm/pat.c 2011-08-23 21:47:55.000000000 -0400
20975@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20976
20977 if (!entry) {
20978 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20979- current->comm, current->pid, start, end);
20980+ current->comm, task_pid_nr(current), start, end);
20981 return -EINVAL;
20982 }
20983
20984@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20985 while (cursor < to) {
20986 if (!devmem_is_allowed(pfn)) {
20987 printk(KERN_INFO
20988- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20989- current->comm, from, to);
20990+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20991+ current->comm, from, to, cursor);
20992 return 0;
20993 }
20994 cursor += PAGE_SIZE;
20995@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20996 printk(KERN_INFO
20997 "%s:%d ioremap_change_attr failed %s "
20998 "for %Lx-%Lx\n",
20999- current->comm, current->pid,
21000+ current->comm, task_pid_nr(current),
21001 cattr_name(flags),
21002 base, (unsigned long long)(base + size));
21003 return -EINVAL;
21004@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21005 if (want_flags != flags) {
21006 printk(KERN_WARNING
21007 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21008- current->comm, current->pid,
21009+ current->comm, task_pid_nr(current),
21010 cattr_name(want_flags),
21011 (unsigned long long)paddr,
21012 (unsigned long long)(paddr + size),
21013@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21014 free_memtype(paddr, paddr + size);
21015 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21016 " for %Lx-%Lx, got %s\n",
21017- current->comm, current->pid,
21018+ current->comm, task_pid_nr(current),
21019 cattr_name(want_flags),
21020 (unsigned long long)paddr,
21021 (unsigned long long)(paddr + size),
21022diff -urNp linux-3.0.7/arch/x86/mm/pf_in.c linux-3.0.7/arch/x86/mm/pf_in.c
21023--- linux-3.0.7/arch/x86/mm/pf_in.c 2011-07-21 22:17:23.000000000 -0400
21024+++ linux-3.0.7/arch/x86/mm/pf_in.c 2011-08-23 21:47:55.000000000 -0400
21025@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21026 int i;
21027 enum reason_type rv = OTHERS;
21028
21029- p = (unsigned char *)ins_addr;
21030+ p = (unsigned char *)ktla_ktva(ins_addr);
21031 p += skip_prefix(p, &prf);
21032 p += get_opcode(p, &opcode);
21033
21034@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21035 struct prefix_bits prf;
21036 int i;
21037
21038- p = (unsigned char *)ins_addr;
21039+ p = (unsigned char *)ktla_ktva(ins_addr);
21040 p += skip_prefix(p, &prf);
21041 p += get_opcode(p, &opcode);
21042
21043@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21044 struct prefix_bits prf;
21045 int i;
21046
21047- p = (unsigned char *)ins_addr;
21048+ p = (unsigned char *)ktla_ktva(ins_addr);
21049 p += skip_prefix(p, &prf);
21050 p += get_opcode(p, &opcode);
21051
21052@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21053 struct prefix_bits prf;
21054 int i;
21055
21056- p = (unsigned char *)ins_addr;
21057+ p = (unsigned char *)ktla_ktva(ins_addr);
21058 p += skip_prefix(p, &prf);
21059 p += get_opcode(p, &opcode);
21060 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21061@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21062 struct prefix_bits prf;
21063 int i;
21064
21065- p = (unsigned char *)ins_addr;
21066+ p = (unsigned char *)ktla_ktva(ins_addr);
21067 p += skip_prefix(p, &prf);
21068 p += get_opcode(p, &opcode);
21069 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21070diff -urNp linux-3.0.7/arch/x86/mm/pgtable_32.c linux-3.0.7/arch/x86/mm/pgtable_32.c
21071--- linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-07-21 22:17:23.000000000 -0400
21072+++ linux-3.0.7/arch/x86/mm/pgtable_32.c 2011-08-23 21:47:55.000000000 -0400
21073@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21074 return;
21075 }
21076 pte = pte_offset_kernel(pmd, vaddr);
21077+
21078+ pax_open_kernel();
21079 if (pte_val(pteval))
21080 set_pte_at(&init_mm, vaddr, pte, pteval);
21081 else
21082 pte_clear(&init_mm, vaddr, pte);
21083+ pax_close_kernel();
21084
21085 /*
21086 * It's enough to flush this one mapping.
21087diff -urNp linux-3.0.7/arch/x86/mm/pgtable.c linux-3.0.7/arch/x86/mm/pgtable.c
21088--- linux-3.0.7/arch/x86/mm/pgtable.c 2011-07-21 22:17:23.000000000 -0400
21089+++ linux-3.0.7/arch/x86/mm/pgtable.c 2011-08-23 21:47:55.000000000 -0400
21090@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21091 list_del(&page->lru);
21092 }
21093
21094-#define UNSHARED_PTRS_PER_PGD \
21095- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21097+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21098
21099+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21100+{
21101+ while (count--)
21102+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21103+}
21104+#endif
21105+
21106+#ifdef CONFIG_PAX_PER_CPU_PGD
21107+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21108+{
21109+ while (count--)
21110+
21111+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21112+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21113+#else
21114+ *dst++ = *src++;
21115+#endif
21116
21117+}
21118+#endif
21119+
21120+#ifdef CONFIG_X86_64
21121+#define pxd_t pud_t
21122+#define pyd_t pgd_t
21123+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21124+#define pxd_free(mm, pud) pud_free((mm), (pud))
21125+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21126+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21127+#define PYD_SIZE PGDIR_SIZE
21128+#else
21129+#define pxd_t pmd_t
21130+#define pyd_t pud_t
21131+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21132+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21133+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21134+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21135+#define PYD_SIZE PUD_SIZE
21136+#endif
21137+
21138+#ifdef CONFIG_PAX_PER_CPU_PGD
21139+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21140+static inline void pgd_dtor(pgd_t *pgd) {}
21141+#else
21142 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21143 {
21144 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21145@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21146 pgd_list_del(pgd);
21147 spin_unlock(&pgd_lock);
21148 }
21149+#endif
21150
21151 /*
21152 * List of all pgd's needed for non-PAE so it can invalidate entries
21153@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21154 * -- wli
21155 */
21156
21157-#ifdef CONFIG_X86_PAE
21158+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21159 /*
21160 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21161 * updating the top-level pagetable entries to guarantee the
21162@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21163 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21164 * and initialize the kernel pmds here.
21165 */
21166-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21167+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21168
21169 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21170 {
21171@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21172 */
21173 flush_tlb_mm(mm);
21174 }
21175+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21176+#define PREALLOCATED_PXDS USER_PGD_PTRS
21177 #else /* !CONFIG_X86_PAE */
21178
21179 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21180-#define PREALLOCATED_PMDS 0
21181+#define PREALLOCATED_PXDS 0
21182
21183 #endif /* CONFIG_X86_PAE */
21184
21185-static void free_pmds(pmd_t *pmds[])
21186+static void free_pxds(pxd_t *pxds[])
21187 {
21188 int i;
21189
21190- for(i = 0; i < PREALLOCATED_PMDS; i++)
21191- if (pmds[i])
21192- free_page((unsigned long)pmds[i]);
21193+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21194+ if (pxds[i])
21195+ free_page((unsigned long)pxds[i]);
21196 }
21197
21198-static int preallocate_pmds(pmd_t *pmds[])
21199+static int preallocate_pxds(pxd_t *pxds[])
21200 {
21201 int i;
21202 bool failed = false;
21203
21204- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21205- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21206- if (pmd == NULL)
21207+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21208+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21209+ if (pxd == NULL)
21210 failed = true;
21211- pmds[i] = pmd;
21212+ pxds[i] = pxd;
21213 }
21214
21215 if (failed) {
21216- free_pmds(pmds);
21217+ free_pxds(pxds);
21218 return -ENOMEM;
21219 }
21220
21221@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21222 * preallocate which never got a corresponding vma will need to be
21223 * freed manually.
21224 */
21225-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21226+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21227 {
21228 int i;
21229
21230- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21231+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21232 pgd_t pgd = pgdp[i];
21233
21234 if (pgd_val(pgd) != 0) {
21235- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21236+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21237
21238- pgdp[i] = native_make_pgd(0);
21239+ set_pgd(pgdp + i, native_make_pgd(0));
21240
21241- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21242- pmd_free(mm, pmd);
21243+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21244+ pxd_free(mm, pxd);
21245 }
21246 }
21247 }
21248
21249-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21250+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21251 {
21252- pud_t *pud;
21253+ pyd_t *pyd;
21254 unsigned long addr;
21255 int i;
21256
21257- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21258+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21259 return;
21260
21261- pud = pud_offset(pgd, 0);
21262+#ifdef CONFIG_X86_64
21263+ pyd = pyd_offset(mm, 0L);
21264+#else
21265+ pyd = pyd_offset(pgd, 0L);
21266+#endif
21267
21268- for (addr = i = 0; i < PREALLOCATED_PMDS;
21269- i++, pud++, addr += PUD_SIZE) {
21270- pmd_t *pmd = pmds[i];
21271+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21272+ i++, pyd++, addr += PYD_SIZE) {
21273+ pxd_t *pxd = pxds[i];
21274
21275 if (i >= KERNEL_PGD_BOUNDARY)
21276- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21277- sizeof(pmd_t) * PTRS_PER_PMD);
21278+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21279+ sizeof(pxd_t) * PTRS_PER_PMD);
21280
21281- pud_populate(mm, pud, pmd);
21282+ pyd_populate(mm, pyd, pxd);
21283 }
21284 }
21285
21286 pgd_t *pgd_alloc(struct mm_struct *mm)
21287 {
21288 pgd_t *pgd;
21289- pmd_t *pmds[PREALLOCATED_PMDS];
21290+ pxd_t *pxds[PREALLOCATED_PXDS];
21291
21292 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21293
21294@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21295
21296 mm->pgd = pgd;
21297
21298- if (preallocate_pmds(pmds) != 0)
21299+ if (preallocate_pxds(pxds) != 0)
21300 goto out_free_pgd;
21301
21302 if (paravirt_pgd_alloc(mm) != 0)
21303- goto out_free_pmds;
21304+ goto out_free_pxds;
21305
21306 /*
21307 * Make sure that pre-populating the pmds is atomic with
21308@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21309 spin_lock(&pgd_lock);
21310
21311 pgd_ctor(mm, pgd);
21312- pgd_prepopulate_pmd(mm, pgd, pmds);
21313+ pgd_prepopulate_pxd(mm, pgd, pxds);
21314
21315 spin_unlock(&pgd_lock);
21316
21317 return pgd;
21318
21319-out_free_pmds:
21320- free_pmds(pmds);
21321+out_free_pxds:
21322+ free_pxds(pxds);
21323 out_free_pgd:
21324 free_page((unsigned long)pgd);
21325 out:
21326@@ -295,7 +344,7 @@ out:
21327
21328 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21329 {
21330- pgd_mop_up_pmds(mm, pgd);
21331+ pgd_mop_up_pxds(mm, pgd);
21332 pgd_dtor(pgd);
21333 paravirt_pgd_free(mm, pgd);
21334 free_page((unsigned long)pgd);
21335diff -urNp linux-3.0.7/arch/x86/mm/setup_nx.c linux-3.0.7/arch/x86/mm/setup_nx.c
21336--- linux-3.0.7/arch/x86/mm/setup_nx.c 2011-07-21 22:17:23.000000000 -0400
21337+++ linux-3.0.7/arch/x86/mm/setup_nx.c 2011-08-23 21:47:55.000000000 -0400
21338@@ -5,8 +5,10 @@
21339 #include <asm/pgtable.h>
21340 #include <asm/proto.h>
21341
21342+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21343 static int disable_nx __cpuinitdata;
21344
21345+#ifndef CONFIG_PAX_PAGEEXEC
21346 /*
21347 * noexec = on|off
21348 *
21349@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21350 return 0;
21351 }
21352 early_param("noexec", noexec_setup);
21353+#endif
21354+
21355+#endif
21356
21357 void __cpuinit x86_configure_nx(void)
21358 {
21359+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21360 if (cpu_has_nx && !disable_nx)
21361 __supported_pte_mask |= _PAGE_NX;
21362 else
21363+#endif
21364 __supported_pte_mask &= ~_PAGE_NX;
21365 }
21366
21367diff -urNp linux-3.0.7/arch/x86/mm/tlb.c linux-3.0.7/arch/x86/mm/tlb.c
21368--- linux-3.0.7/arch/x86/mm/tlb.c 2011-07-21 22:17:23.000000000 -0400
21369+++ linux-3.0.7/arch/x86/mm/tlb.c 2011-08-23 21:47:55.000000000 -0400
21370@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21371 BUG();
21372 cpumask_clear_cpu(cpu,
21373 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21374+
21375+#ifndef CONFIG_PAX_PER_CPU_PGD
21376 load_cr3(swapper_pg_dir);
21377+#endif
21378+
21379 }
21380 EXPORT_SYMBOL_GPL(leave_mm);
21381
21382diff -urNp linux-3.0.7/arch/x86/net/bpf_jit_comp.c linux-3.0.7/arch/x86/net/bpf_jit_comp.c
21383--- linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-07-21 22:17:23.000000000 -0400
21384+++ linux-3.0.7/arch/x86/net/bpf_jit_comp.c 2011-08-23 21:47:55.000000000 -0400
21385@@ -589,7 +589,9 @@ cond_branch: f_offset = addrs[i + filt
21386 module_free(NULL, image);
21387 return;
21388 }
21389+ pax_open_kernel();
21390 memcpy(image + proglen, temp, ilen);
21391+ pax_close_kernel();
21392 }
21393 proglen += ilen;
21394 addrs[i] = proglen;
21395@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
21396 break;
21397 }
21398 if (proglen == oldproglen) {
21399- image = module_alloc(max_t(unsigned int,
21400+ image = module_alloc_exec(max_t(unsigned int,
21401 proglen,
21402 sizeof(struct work_struct)));
21403 if (!image)
21404diff -urNp linux-3.0.7/arch/x86/net/bpf_jit.S linux-3.0.7/arch/x86/net/bpf_jit.S
21405--- linux-3.0.7/arch/x86/net/bpf_jit.S 2011-07-21 22:17:23.000000000 -0400
21406+++ linux-3.0.7/arch/x86/net/bpf_jit.S 2011-10-07 19:07:28.000000000 -0400
21407@@ -9,6 +9,7 @@
21408 */
21409 #include <linux/linkage.h>
21410 #include <asm/dwarf2.h>
21411+#include <asm/alternative-asm.h>
21412
21413 /*
21414 * Calling convention :
21415@@ -35,6 +36,7 @@ sk_load_word:
21416 jle bpf_slow_path_word
21417 mov (SKBDATA,%rsi),%eax
21418 bswap %eax /* ntohl() */
21419+ pax_force_retaddr
21420 ret
21421
21422
21423@@ -53,6 +55,7 @@ sk_load_half:
21424 jle bpf_slow_path_half
21425 movzwl (SKBDATA,%rsi),%eax
21426 rol $8,%ax # ntohs()
21427+ pax_force_retaddr
21428 ret
21429
21430 sk_load_byte_ind:
21431@@ -66,6 +69,7 @@ sk_load_byte:
21432 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
21433 jle bpf_slow_path_byte
21434 movzbl (SKBDATA,%rsi),%eax
21435+ pax_force_retaddr
21436 ret
21437
21438 /**
21439@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
21440 movzbl (SKBDATA,%rsi),%ebx
21441 and $15,%bl
21442 shl $2,%bl
21443+ pax_force_retaddr
21444 ret
21445 CFI_ENDPROC
21446 ENDPROC(sk_load_byte_msh)
21447@@ -91,6 +96,7 @@ bpf_error:
21448 xor %eax,%eax
21449 mov -8(%rbp),%rbx
21450 leaveq
21451+ pax_force_retaddr
21452 ret
21453
21454 /* rsi contains offset and can be scratched */
21455@@ -113,6 +119,7 @@ bpf_slow_path_word:
21456 js bpf_error
21457 mov -12(%rbp),%eax
21458 bswap %eax
21459+ pax_force_retaddr
21460 ret
21461
21462 bpf_slow_path_half:
21463@@ -121,12 +128,14 @@ bpf_slow_path_half:
21464 mov -12(%rbp),%ax
21465 rol $8,%ax
21466 movzwl %ax,%eax
21467+ pax_force_retaddr
21468 ret
21469
21470 bpf_slow_path_byte:
21471 bpf_slow_path_common(1)
21472 js bpf_error
21473 movzbl -12(%rbp),%eax
21474+ pax_force_retaddr
21475 ret
21476
21477 bpf_slow_path_byte_msh:
21478@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
21479 and $15,%al
21480 shl $2,%al
21481 xchg %eax,%ebx
21482+ pax_force_retaddr
21483 ret
21484diff -urNp linux-3.0.7/arch/x86/oprofile/backtrace.c linux-3.0.7/arch/x86/oprofile/backtrace.c
21485--- linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-09-02 18:11:21.000000000 -0400
21486+++ linux-3.0.7/arch/x86/oprofile/backtrace.c 2011-10-06 04:17:55.000000000 -0400
21487@@ -83,11 +83,11 @@ dump_user_backtrace_32(struct stack_fram
21488 struct stack_frame_ia32 *fp;
21489 unsigned long bytes;
21490
21491- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21492+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21493 if (bytes != sizeof(bufhead))
21494 return NULL;
21495
21496- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
21497+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
21498
21499 oprofile_add_trace(bufhead[0].return_address);
21500
21501@@ -129,7 +129,7 @@ static struct stack_frame *dump_user_bac
21502 struct stack_frame bufhead[2];
21503 unsigned long bytes;
21504
21505- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
21506+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
21507 if (bytes != sizeof(bufhead))
21508 return NULL;
21509
21510@@ -148,7 +148,7 @@ x86_backtrace(struct pt_regs * const reg
21511 {
21512 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
21513
21514- if (!user_mode_vm(regs)) {
21515+ if (!user_mode(regs)) {
21516 unsigned long stack = kernel_stack_pointer(regs);
21517 if (depth)
21518 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21519diff -urNp linux-3.0.7/arch/x86/pci/mrst.c linux-3.0.7/arch/x86/pci/mrst.c
21520--- linux-3.0.7/arch/x86/pci/mrst.c 2011-07-21 22:17:23.000000000 -0400
21521+++ linux-3.0.7/arch/x86/pci/mrst.c 2011-08-23 21:47:55.000000000 -0400
21522@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
21523 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
21524 pci_mmcfg_late_init();
21525 pcibios_enable_irq = mrst_pci_irq_enable;
21526- pci_root_ops = pci_mrst_ops;
21527+ pax_open_kernel();
21528+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
21529+ pax_close_kernel();
21530 /* Continue with standard init */
21531 return 1;
21532 }
21533diff -urNp linux-3.0.7/arch/x86/pci/pcbios.c linux-3.0.7/arch/x86/pci/pcbios.c
21534--- linux-3.0.7/arch/x86/pci/pcbios.c 2011-07-21 22:17:23.000000000 -0400
21535+++ linux-3.0.7/arch/x86/pci/pcbios.c 2011-08-23 21:47:55.000000000 -0400
21536@@ -79,50 +79,93 @@ union bios32 {
21537 static struct {
21538 unsigned long address;
21539 unsigned short segment;
21540-} bios32_indirect = { 0, __KERNEL_CS };
21541+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21542
21543 /*
21544 * Returns the entry point for the given service, NULL on error
21545 */
21546
21547-static unsigned long bios32_service(unsigned long service)
21548+static unsigned long __devinit bios32_service(unsigned long service)
21549 {
21550 unsigned char return_code; /* %al */
21551 unsigned long address; /* %ebx */
21552 unsigned long length; /* %ecx */
21553 unsigned long entry; /* %edx */
21554 unsigned long flags;
21555+ struct desc_struct d, *gdt;
21556
21557 local_irq_save(flags);
21558- __asm__("lcall *(%%edi); cld"
21559+
21560+ gdt = get_cpu_gdt_table(smp_processor_id());
21561+
21562+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21563+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21564+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21565+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21566+
21567+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21568 : "=a" (return_code),
21569 "=b" (address),
21570 "=c" (length),
21571 "=d" (entry)
21572 : "0" (service),
21573 "1" (0),
21574- "D" (&bios32_indirect));
21575+ "D" (&bios32_indirect),
21576+ "r"(__PCIBIOS_DS)
21577+ : "memory");
21578+
21579+ pax_open_kernel();
21580+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21581+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21582+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21583+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21584+ pax_close_kernel();
21585+
21586 local_irq_restore(flags);
21587
21588 switch (return_code) {
21589- case 0:
21590- return address + entry;
21591- case 0x80: /* Not present */
21592- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21593- return 0;
21594- default: /* Shouldn't happen */
21595- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21596- service, return_code);
21597+ case 0: {
21598+ int cpu;
21599+ unsigned char flags;
21600+
21601+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21602+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21603+ printk(KERN_WARNING "bios32_service: not valid\n");
21604 return 0;
21605+ }
21606+ address = address + PAGE_OFFSET;
21607+ length += 16UL; /* some BIOSs underreport this... */
21608+ flags = 4;
21609+ if (length >= 64*1024*1024) {
21610+ length >>= PAGE_SHIFT;
21611+ flags |= 8;
21612+ }
21613+
21614+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21615+ gdt = get_cpu_gdt_table(cpu);
21616+ pack_descriptor(&d, address, length, 0x9b, flags);
21617+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21618+ pack_descriptor(&d, address, length, 0x93, flags);
21619+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21620+ }
21621+ return entry;
21622+ }
21623+ case 0x80: /* Not present */
21624+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21625+ return 0;
21626+ default: /* Shouldn't happen */
21627+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21628+ service, return_code);
21629+ return 0;
21630 }
21631 }
21632
21633 static struct {
21634 unsigned long address;
21635 unsigned short segment;
21636-} pci_indirect = { 0, __KERNEL_CS };
21637+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
21638
21639-static int pci_bios_present;
21640+static int pci_bios_present __read_only;
21641
21642 static int __devinit check_pcibios(void)
21643 {
21644@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
21645 unsigned long flags, pcibios_entry;
21646
21647 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
21648- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
21649+ pci_indirect.address = pcibios_entry;
21650
21651 local_irq_save(flags);
21652- __asm__(
21653- "lcall *(%%edi); cld\n\t"
21654+ __asm__("movw %w6, %%ds\n\t"
21655+ "lcall *%%ss:(%%edi); cld\n\t"
21656+ "push %%ss\n\t"
21657+ "pop %%ds\n\t"
21658 "jc 1f\n\t"
21659 "xor %%ah, %%ah\n"
21660 "1:"
21661@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
21662 "=b" (ebx),
21663 "=c" (ecx)
21664 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
21665- "D" (&pci_indirect)
21666+ "D" (&pci_indirect),
21667+ "r" (__PCIBIOS_DS)
21668 : "memory");
21669 local_irq_restore(flags);
21670
21671@@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
21672
21673 switch (len) {
21674 case 1:
21675- __asm__("lcall *(%%esi); cld\n\t"
21676+ __asm__("movw %w6, %%ds\n\t"
21677+ "lcall *%%ss:(%%esi); cld\n\t"
21678+ "push %%ss\n\t"
21679+ "pop %%ds\n\t"
21680 "jc 1f\n\t"
21681 "xor %%ah, %%ah\n"
21682 "1:"
21683@@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
21684 : "1" (PCIBIOS_READ_CONFIG_BYTE),
21685 "b" (bx),
21686 "D" ((long)reg),
21687- "S" (&pci_indirect));
21688+ "S" (&pci_indirect),
21689+ "r" (__PCIBIOS_DS));
21690 /*
21691 * Zero-extend the result beyond 8 bits, do not trust the
21692 * BIOS having done it:
21693@@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
21694 *value &= 0xff;
21695 break;
21696 case 2:
21697- __asm__("lcall *(%%esi); cld\n\t"
21698+ __asm__("movw %w6, %%ds\n\t"
21699+ "lcall *%%ss:(%%esi); cld\n\t"
21700+ "push %%ss\n\t"
21701+ "pop %%ds\n\t"
21702 "jc 1f\n\t"
21703 "xor %%ah, %%ah\n"
21704 "1:"
21705@@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
21706 : "1" (PCIBIOS_READ_CONFIG_WORD),
21707 "b" (bx),
21708 "D" ((long)reg),
21709- "S" (&pci_indirect));
21710+ "S" (&pci_indirect),
21711+ "r" (__PCIBIOS_DS));
21712 /*
21713 * Zero-extend the result beyond 16 bits, do not trust the
21714 * BIOS having done it:
21715@@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
21716 *value &= 0xffff;
21717 break;
21718 case 4:
21719- __asm__("lcall *(%%esi); cld\n\t"
21720+ __asm__("movw %w6, %%ds\n\t"
21721+ "lcall *%%ss:(%%esi); cld\n\t"
21722+ "push %%ss\n\t"
21723+ "pop %%ds\n\t"
21724 "jc 1f\n\t"
21725 "xor %%ah, %%ah\n"
21726 "1:"
21727@@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
21728 : "1" (PCIBIOS_READ_CONFIG_DWORD),
21729 "b" (bx),
21730 "D" ((long)reg),
21731- "S" (&pci_indirect));
21732+ "S" (&pci_indirect),
21733+ "r" (__PCIBIOS_DS));
21734 break;
21735 }
21736
21737@@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
21738
21739 switch (len) {
21740 case 1:
21741- __asm__("lcall *(%%esi); cld\n\t"
21742+ __asm__("movw %w6, %%ds\n\t"
21743+ "lcall *%%ss:(%%esi); cld\n\t"
21744+ "push %%ss\n\t"
21745+ "pop %%ds\n\t"
21746 "jc 1f\n\t"
21747 "xor %%ah, %%ah\n"
21748 "1:"
21749@@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
21750 "c" (value),
21751 "b" (bx),
21752 "D" ((long)reg),
21753- "S" (&pci_indirect));
21754+ "S" (&pci_indirect),
21755+ "r" (__PCIBIOS_DS));
21756 break;
21757 case 2:
21758- __asm__("lcall *(%%esi); cld\n\t"
21759+ __asm__("movw %w6, %%ds\n\t"
21760+ "lcall *%%ss:(%%esi); cld\n\t"
21761+ "push %%ss\n\t"
21762+ "pop %%ds\n\t"
21763 "jc 1f\n\t"
21764 "xor %%ah, %%ah\n"
21765 "1:"
21766@@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
21767 "c" (value),
21768 "b" (bx),
21769 "D" ((long)reg),
21770- "S" (&pci_indirect));
21771+ "S" (&pci_indirect),
21772+ "r" (__PCIBIOS_DS));
21773 break;
21774 case 4:
21775- __asm__("lcall *(%%esi); cld\n\t"
21776+ __asm__("movw %w6, %%ds\n\t"
21777+ "lcall *%%ss:(%%esi); cld\n\t"
21778+ "push %%ss\n\t"
21779+ "pop %%ds\n\t"
21780 "jc 1f\n\t"
21781 "xor %%ah, %%ah\n"
21782 "1:"
21783@@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
21784 "c" (value),
21785 "b" (bx),
21786 "D" ((long)reg),
21787- "S" (&pci_indirect));
21788+ "S" (&pci_indirect),
21789+ "r" (__PCIBIOS_DS));
21790 break;
21791 }
21792
21793@@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
21794
21795 DBG("PCI: Fetching IRQ routing table... ");
21796 __asm__("push %%es\n\t"
21797+ "movw %w8, %%ds\n\t"
21798 "push %%ds\n\t"
21799 "pop %%es\n\t"
21800- "lcall *(%%esi); cld\n\t"
21801+ "lcall *%%ss:(%%esi); cld\n\t"
21802 "pop %%es\n\t"
21803+ "push %%ss\n\t"
21804+ "pop %%ds\n"
21805 "jc 1f\n\t"
21806 "xor %%ah, %%ah\n"
21807 "1:"
21808@@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21809 "1" (0),
21810 "D" ((long) &opt),
21811 "S" (&pci_indirect),
21812- "m" (opt)
21813+ "m" (opt),
21814+ "r" (__PCIBIOS_DS)
21815 : "memory");
21816 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21817 if (ret & 0xff00)
21818@@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21819 {
21820 int ret;
21821
21822- __asm__("lcall *(%%esi); cld\n\t"
21823+ __asm__("movw %w5, %%ds\n\t"
21824+ "lcall *%%ss:(%%esi); cld\n\t"
21825+ "push %%ss\n\t"
21826+ "pop %%ds\n"
21827 "jc 1f\n\t"
21828 "xor %%ah, %%ah\n"
21829 "1:"
21830@@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21831 : "0" (PCIBIOS_SET_PCI_HW_INT),
21832 "b" ((dev->bus->number << 8) | dev->devfn),
21833 "c" ((irq << 8) | (pin + 10)),
21834- "S" (&pci_indirect));
21835+ "S" (&pci_indirect),
21836+ "r" (__PCIBIOS_DS));
21837 return !(ret & 0xff00);
21838 }
21839 EXPORT_SYMBOL(pcibios_set_irq_routing);
21840diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_32.c linux-3.0.7/arch/x86/platform/efi/efi_32.c
21841--- linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-07-21 22:17:23.000000000 -0400
21842+++ linux-3.0.7/arch/x86/platform/efi/efi_32.c 2011-10-06 04:17:55.000000000 -0400
21843@@ -38,70 +38,56 @@
21844 */
21845
21846 static unsigned long efi_rt_eflags;
21847-static pgd_t efi_bak_pg_dir_pointer[2];
21848+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21849
21850-void efi_call_phys_prelog(void)
21851+void __init efi_call_phys_prelog(void)
21852 {
21853- unsigned long cr4;
21854- unsigned long temp;
21855 struct desc_ptr gdt_descr;
21856
21857- local_irq_save(efi_rt_eflags);
21858+#ifdef CONFIG_PAX_KERNEXEC
21859+ struct desc_struct d;
21860+#endif
21861
21862- /*
21863- * If I don't have PAE, I should just duplicate two entries in page
21864- * directory. If I have PAE, I just need to duplicate one entry in
21865- * page directory.
21866- */
21867- cr4 = read_cr4_safe();
21868+ local_irq_save(efi_rt_eflags);
21869
21870- if (cr4 & X86_CR4_PAE) {
21871- efi_bak_pg_dir_pointer[0].pgd =
21872- swapper_pg_dir[pgd_index(0)].pgd;
21873- swapper_pg_dir[0].pgd =
21874- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21875- } else {
21876- efi_bak_pg_dir_pointer[0].pgd =
21877- swapper_pg_dir[pgd_index(0)].pgd;
21878- efi_bak_pg_dir_pointer[1].pgd =
21879- swapper_pg_dir[pgd_index(0x400000)].pgd;
21880- swapper_pg_dir[pgd_index(0)].pgd =
21881- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21882- temp = PAGE_OFFSET + 0x400000;
21883- swapper_pg_dir[pgd_index(0x400000)].pgd =
21884- swapper_pg_dir[pgd_index(temp)].pgd;
21885- }
21886+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21887+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21888+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21889
21890 /*
21891 * After the lock is released, the original page table is restored.
21892 */
21893 __flush_tlb_all();
21894
21895+#ifdef CONFIG_PAX_KERNEXEC
21896+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
21897+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21898+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
21899+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21900+#endif
21901+
21902 gdt_descr.address = __pa(get_cpu_gdt_table(0));
21903 gdt_descr.size = GDT_SIZE - 1;
21904 load_gdt(&gdt_descr);
21905 }
21906
21907-void efi_call_phys_epilog(void)
21908+void __init efi_call_phys_epilog(void)
21909 {
21910- unsigned long cr4;
21911 struct desc_ptr gdt_descr;
21912
21913+#ifdef CONFIG_PAX_KERNEXEC
21914+ struct desc_struct d;
21915+
21916+ memset(&d, 0, sizeof d);
21917+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
21918+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
21919+#endif
21920+
21921 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21922 gdt_descr.size = GDT_SIZE - 1;
21923 load_gdt(&gdt_descr);
21924
21925- cr4 = read_cr4_safe();
21926-
21927- if (cr4 & X86_CR4_PAE) {
21928- swapper_pg_dir[pgd_index(0)].pgd =
21929- efi_bak_pg_dir_pointer[0].pgd;
21930- } else {
21931- swapper_pg_dir[pgd_index(0)].pgd =
21932- efi_bak_pg_dir_pointer[0].pgd;
21933- swapper_pg_dir[pgd_index(0x400000)].pgd =
21934- efi_bak_pg_dir_pointer[1].pgd;
21935- }
21936+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21937
21938 /*
21939 * After the lock is released, the original page table is restored.
21940diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S
21941--- linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-07-21 22:17:23.000000000 -0400
21942+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_32.S 2011-09-19 09:16:58.000000000 -0400
21943@@ -6,7 +6,9 @@
21944 */
21945
21946 #include <linux/linkage.h>
21947+#include <linux/init.h>
21948 #include <asm/page_types.h>
21949+#include <asm/segment.h>
21950
21951 /*
21952 * efi_call_phys(void *, ...) is a function with variable parameters.
21953@@ -20,7 +22,7 @@
21954 * service functions will comply with gcc calling convention, too.
21955 */
21956
21957-.text
21958+__INIT
21959 ENTRY(efi_call_phys)
21960 /*
21961 * 0. The function can only be called in Linux kernel. So CS has been
21962@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
21963 * The mapping of lower virtual memory has been created in prelog and
21964 * epilog.
21965 */
21966- movl $1f, %edx
21967- subl $__PAGE_OFFSET, %edx
21968- jmp *%edx
21969+ movl $(__KERNEXEC_EFI_DS), %edx
21970+ mov %edx, %ds
21971+ mov %edx, %es
21972+ mov %edx, %ss
21973+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
21974 1:
21975
21976 /*
21977@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
21978 * parameter 2, ..., param n. To make things easy, we save the return
21979 * address of efi_call_phys in a global variable.
21980 */
21981- popl %edx
21982- movl %edx, saved_return_addr
21983- /* get the function pointer into ECX*/
21984- popl %ecx
21985- movl %ecx, efi_rt_function_ptr
21986- movl $2f, %edx
21987- subl $__PAGE_OFFSET, %edx
21988- pushl %edx
21989+ popl (saved_return_addr)
21990+ popl (efi_rt_function_ptr)
21991
21992 /*
21993 * 3. Clear PG bit in %CR0.
21994@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
21995 /*
21996 * 5. Call the physical function.
21997 */
21998- jmp *%ecx
21999+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22000
22001-2:
22002 /*
22003 * 6. After EFI runtime service returns, control will return to
22004 * following instruction. We'd better readjust stack pointer first.
22005@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22006 movl %cr0, %edx
22007 orl $0x80000000, %edx
22008 movl %edx, %cr0
22009- jmp 1f
22010-1:
22011+
22012 /*
22013 * 8. Now restore the virtual mode from flat mode by
22014 * adding EIP with PAGE_OFFSET.
22015 */
22016- movl $1f, %edx
22017- jmp *%edx
22018+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22019 1:
22020+ movl $(__KERNEL_DS), %edx
22021+ mov %edx, %ds
22022+ mov %edx, %es
22023+ mov %edx, %ss
22024
22025 /*
22026 * 9. Balance the stack. And because EAX contain the return value,
22027 * we'd better not clobber it.
22028 */
22029- leal efi_rt_function_ptr, %edx
22030- movl (%edx), %ecx
22031- pushl %ecx
22032+ pushl (efi_rt_function_ptr)
22033
22034 /*
22035- * 10. Push the saved return address onto the stack and return.
22036+ * 10. Return to the saved return address.
22037 */
22038- leal saved_return_addr, %edx
22039- movl (%edx), %ecx
22040- pushl %ecx
22041- ret
22042+ jmpl *(saved_return_addr)
22043 ENDPROC(efi_call_phys)
22044 .previous
22045
22046-.data
22047+__INITDATA
22048 saved_return_addr:
22049 .long 0
22050 efi_rt_function_ptr:
22051diff -urNp linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S
22052--- linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-07-21 22:17:23.000000000 -0400
22053+++ linux-3.0.7/arch/x86/platform/efi/efi_stub_64.S 2011-10-06 04:17:55.000000000 -0400
22054@@ -7,6 +7,7 @@
22055 */
22056
22057 #include <linux/linkage.h>
22058+#include <asm/alternative-asm.h>
22059
22060 #define SAVE_XMM \
22061 mov %rsp, %rax; \
22062@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22063 call *%rdi
22064 addq $32, %rsp
22065 RESTORE_XMM
22066+ pax_force_retaddr
22067 ret
22068 ENDPROC(efi_call0)
22069
22070@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22071 call *%rdi
22072 addq $32, %rsp
22073 RESTORE_XMM
22074+ pax_force_retaddr
22075 ret
22076 ENDPROC(efi_call1)
22077
22078@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22079 call *%rdi
22080 addq $32, %rsp
22081 RESTORE_XMM
22082+ pax_force_retaddr
22083 ret
22084 ENDPROC(efi_call2)
22085
22086@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22087 call *%rdi
22088 addq $32, %rsp
22089 RESTORE_XMM
22090+ pax_force_retaddr
22091 ret
22092 ENDPROC(efi_call3)
22093
22094@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22095 call *%rdi
22096 addq $32, %rsp
22097 RESTORE_XMM
22098+ pax_force_retaddr
22099 ret
22100 ENDPROC(efi_call4)
22101
22102@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22103 call *%rdi
22104 addq $48, %rsp
22105 RESTORE_XMM
22106+ pax_force_retaddr
22107 ret
22108 ENDPROC(efi_call5)
22109
22110@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22111 call *%rdi
22112 addq $48, %rsp
22113 RESTORE_XMM
22114+ pax_force_retaddr
22115 ret
22116 ENDPROC(efi_call6)
22117diff -urNp linux-3.0.7/arch/x86/platform/mrst/mrst.c linux-3.0.7/arch/x86/platform/mrst/mrst.c
22118--- linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-07-21 22:17:23.000000000 -0400
22119+++ linux-3.0.7/arch/x86/platform/mrst/mrst.c 2011-08-23 21:47:55.000000000 -0400
22120@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22121 }
22122
22123 /* Reboot and power off are handled by the SCU on a MID device */
22124-static void mrst_power_off(void)
22125+static __noreturn void mrst_power_off(void)
22126 {
22127 intel_scu_ipc_simple_command(0xf1, 1);
22128+ BUG();
22129 }
22130
22131-static void mrst_reboot(void)
22132+static __noreturn void mrst_reboot(void)
22133 {
22134 intel_scu_ipc_simple_command(0xf1, 0);
22135+ BUG();
22136 }
22137
22138 /*
22139diff -urNp linux-3.0.7/arch/x86/platform/uv/tlb_uv.c linux-3.0.7/arch/x86/platform/uv/tlb_uv.c
22140--- linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-07-21 22:17:23.000000000 -0400
22141+++ linux-3.0.7/arch/x86/platform/uv/tlb_uv.c 2011-08-23 21:48:14.000000000 -0400
22142@@ -373,6 +373,8 @@ static void reset_with_ipi(struct bau_ta
22143 cpumask_t mask;
22144 struct reset_args reset_args;
22145
22146+ pax_track_stack();
22147+
22148 reset_args.sender = sender;
22149 cpus_clear(mask);
22150 /* find a single cpu for each uvhub in this distribution mask */
22151diff -urNp linux-3.0.7/arch/x86/power/cpu.c linux-3.0.7/arch/x86/power/cpu.c
22152--- linux-3.0.7/arch/x86/power/cpu.c 2011-07-21 22:17:23.000000000 -0400
22153+++ linux-3.0.7/arch/x86/power/cpu.c 2011-08-23 21:47:55.000000000 -0400
22154@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22155 static void fix_processor_context(void)
22156 {
22157 int cpu = smp_processor_id();
22158- struct tss_struct *t = &per_cpu(init_tss, cpu);
22159+ struct tss_struct *t = init_tss + cpu;
22160
22161 set_tss_desc(cpu, t); /*
22162 * This just modifies memory; should not be
22163@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22164 */
22165
22166 #ifdef CONFIG_X86_64
22167+ pax_open_kernel();
22168 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22169+ pax_close_kernel();
22170
22171 syscall_init(); /* This sets MSR_*STAR and related */
22172 #endif
22173diff -urNp linux-3.0.7/arch/x86/vdso/Makefile linux-3.0.7/arch/x86/vdso/Makefile
22174--- linux-3.0.7/arch/x86/vdso/Makefile 2011-07-21 22:17:23.000000000 -0400
22175+++ linux-3.0.7/arch/x86/vdso/Makefile 2011-08-23 21:47:55.000000000 -0400
22176@@ -136,7 +136,7 @@ quiet_cmd_vdso = VDSO $@
22177 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22178 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22179
22180-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22181+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22182 GCOV_PROFILE := n
22183
22184 #
22185diff -urNp linux-3.0.7/arch/x86/vdso/vdso32-setup.c linux-3.0.7/arch/x86/vdso/vdso32-setup.c
22186--- linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-07-21 22:17:23.000000000 -0400
22187+++ linux-3.0.7/arch/x86/vdso/vdso32-setup.c 2011-08-23 21:47:55.000000000 -0400
22188@@ -25,6 +25,7 @@
22189 #include <asm/tlbflush.h>
22190 #include <asm/vdso.h>
22191 #include <asm/proto.h>
22192+#include <asm/mman.h>
22193
22194 enum {
22195 VDSO_DISABLED = 0,
22196@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22197 void enable_sep_cpu(void)
22198 {
22199 int cpu = get_cpu();
22200- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22201+ struct tss_struct *tss = init_tss + cpu;
22202
22203 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22204 put_cpu();
22205@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22206 gate_vma.vm_start = FIXADDR_USER_START;
22207 gate_vma.vm_end = FIXADDR_USER_END;
22208 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22209- gate_vma.vm_page_prot = __P101;
22210+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22211 /*
22212 * Make sure the vDSO gets into every core dump.
22213 * Dumping its contents makes post-mortem fully interpretable later
22214@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22215 if (compat)
22216 addr = VDSO_HIGH_BASE;
22217 else {
22218- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22219+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22220 if (IS_ERR_VALUE(addr)) {
22221 ret = addr;
22222 goto up_fail;
22223 }
22224 }
22225
22226- current->mm->context.vdso = (void *)addr;
22227+ current->mm->context.vdso = addr;
22228
22229 if (compat_uses_vma || !compat) {
22230 /*
22231@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22232 }
22233
22234 current_thread_info()->sysenter_return =
22235- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22236+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22237
22238 up_fail:
22239 if (ret)
22240- current->mm->context.vdso = NULL;
22241+ current->mm->context.vdso = 0;
22242
22243 up_write(&mm->mmap_sem);
22244
22245@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22246
22247 const char *arch_vma_name(struct vm_area_struct *vma)
22248 {
22249- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22250+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22251 return "[vdso]";
22252+
22253+#ifdef CONFIG_PAX_SEGMEXEC
22254+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22255+ return "[vdso]";
22256+#endif
22257+
22258 return NULL;
22259 }
22260
22261@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22262 * Check to see if the corresponding task was created in compat vdso
22263 * mode.
22264 */
22265- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22266+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22267 return &gate_vma;
22268 return NULL;
22269 }
22270diff -urNp linux-3.0.7/arch/x86/vdso/vma.c linux-3.0.7/arch/x86/vdso/vma.c
22271--- linux-3.0.7/arch/x86/vdso/vma.c 2011-07-21 22:17:23.000000000 -0400
22272+++ linux-3.0.7/arch/x86/vdso/vma.c 2011-08-23 21:47:55.000000000 -0400
22273@@ -15,18 +15,19 @@
22274 #include <asm/proto.h>
22275 #include <asm/vdso.h>
22276
22277-unsigned int __read_mostly vdso_enabled = 1;
22278-
22279 extern char vdso_start[], vdso_end[];
22280 extern unsigned short vdso_sync_cpuid;
22281+extern char __vsyscall_0;
22282
22283 static struct page **vdso_pages;
22284+static struct page *vsyscall_page;
22285 static unsigned vdso_size;
22286
22287 static int __init init_vdso_vars(void)
22288 {
22289- int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
22290- int i;
22291+ size_t nbytes = vdso_end - vdso_start;
22292+ size_t npages = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
22293+ size_t i;
22294
22295 vdso_size = npages << PAGE_SHIFT;
22296 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
22297@@ -34,19 +35,19 @@ static int __init init_vdso_vars(void)
22298 goto oom;
22299 for (i = 0; i < npages; i++) {
22300 struct page *p;
22301- p = alloc_page(GFP_KERNEL);
22302+ p = alloc_page(GFP_KERNEL | __GFP_ZERO);
22303 if (!p)
22304 goto oom;
22305 vdso_pages[i] = p;
22306- copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
22307+ memcpy(page_address(p), vdso_start + i*PAGE_SIZE, nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes);
22308+ nbytes -= PAGE_SIZE;
22309 }
22310+ vsyscall_page = pfn_to_page((__pa_symbol(&__vsyscall_0)) >> PAGE_SHIFT);
22311
22312 return 0;
22313
22314 oom:
22315- printk("Cannot allocate vdso\n");
22316- vdso_enabled = 0;
22317- return -ENOMEM;
22318+ panic("Cannot allocate vdso\n");
22319 }
22320 subsys_initcall(init_vdso_vars);
22321
22322@@ -80,37 +81,35 @@ int arch_setup_additional_pages(struct l
22323 unsigned long addr;
22324 int ret;
22325
22326- if (!vdso_enabled)
22327- return 0;
22328-
22329 down_write(&mm->mmap_sem);
22330- addr = vdso_addr(mm->start_stack, vdso_size);
22331- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22332+ addr = vdso_addr(mm->start_stack, vdso_size + PAGE_SIZE);
22333+ addr = get_unmapped_area(NULL, addr, vdso_size + PAGE_SIZE, 0, 0);
22334 if (IS_ERR_VALUE(addr)) {
22335 ret = addr;
22336 goto up_fail;
22337 }
22338
22339- current->mm->context.vdso = (void *)addr;
22340+ mm->context.vdso = addr + PAGE_SIZE;
22341
22342- ret = install_special_mapping(mm, addr, vdso_size,
22343+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
22344 VM_READ|VM_EXEC|
22345- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22346+ VM_MAYREAD|VM_MAYEXEC|
22347 VM_ALWAYSDUMP,
22348- vdso_pages);
22349+ &vsyscall_page);
22350 if (ret) {
22351- current->mm->context.vdso = NULL;
22352+ mm->context.vdso = 0;
22353 goto up_fail;
22354 }
22355
22356+ ret = install_special_mapping(mm, addr + PAGE_SIZE, vdso_size,
22357+ VM_READ|VM_EXEC|
22358+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22359+ VM_ALWAYSDUMP,
22360+ vdso_pages);
22361+ if (ret)
22362+ mm->context.vdso = 0;
22363+
22364 up_fail:
22365 up_write(&mm->mmap_sem);
22366 return ret;
22367 }
22368-
22369-static __init int vdso_setup(char *s)
22370-{
22371- vdso_enabled = simple_strtoul(s, NULL, 0);
22372- return 0;
22373-}
22374-__setup("vdso=", vdso_setup);
22375diff -urNp linux-3.0.7/arch/x86/xen/enlighten.c linux-3.0.7/arch/x86/xen/enlighten.c
22376--- linux-3.0.7/arch/x86/xen/enlighten.c 2011-09-02 18:11:26.000000000 -0400
22377+++ linux-3.0.7/arch/x86/xen/enlighten.c 2011-08-29 23:26:21.000000000 -0400
22378@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22379
22380 struct shared_info xen_dummy_shared_info;
22381
22382-void *xen_initial_gdt;
22383-
22384 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22385 __read_mostly int xen_have_vector_callback;
22386 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22387@@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
22388 #endif
22389 };
22390
22391-static void xen_reboot(int reason)
22392+static __noreturn void xen_reboot(int reason)
22393 {
22394 struct sched_shutdown r = { .reason = reason };
22395
22396@@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
22397 BUG();
22398 }
22399
22400-static void xen_restart(char *msg)
22401+static __noreturn void xen_restart(char *msg)
22402 {
22403 xen_reboot(SHUTDOWN_reboot);
22404 }
22405
22406-static void xen_emergency_restart(void)
22407+static __noreturn void xen_emergency_restart(void)
22408 {
22409 xen_reboot(SHUTDOWN_reboot);
22410 }
22411
22412-static void xen_machine_halt(void)
22413+static __noreturn void xen_machine_halt(void)
22414 {
22415 xen_reboot(SHUTDOWN_poweroff);
22416 }
22417@@ -1134,7 +1132,17 @@ asmlinkage void __init xen_start_kernel(
22418 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22419
22420 /* Work out if we support NX */
22421- x86_configure_nx();
22422+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22423+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22424+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22425+ unsigned l, h;
22426+
22427+ __supported_pte_mask |= _PAGE_NX;
22428+ rdmsr(MSR_EFER, l, h);
22429+ l |= EFER_NX;
22430+ wrmsr(MSR_EFER, l, h);
22431+ }
22432+#endif
22433
22434 xen_setup_features();
22435
22436@@ -1165,13 +1173,6 @@ asmlinkage void __init xen_start_kernel(
22437
22438 machine_ops = xen_machine_ops;
22439
22440- /*
22441- * The only reliable way to retain the initial address of the
22442- * percpu gdt_page is to remember it here, so we can go and
22443- * mark it RW later, when the initial percpu area is freed.
22444- */
22445- xen_initial_gdt = &per_cpu(gdt_page, 0);
22446-
22447 xen_smp_init();
22448
22449 #ifdef CONFIG_ACPI_NUMA
22450diff -urNp linux-3.0.7/arch/x86/xen/mmu.c linux-3.0.7/arch/x86/xen/mmu.c
22451--- linux-3.0.7/arch/x86/xen/mmu.c 2011-09-02 18:11:26.000000000 -0400
22452+++ linux-3.0.7/arch/x86/xen/mmu.c 2011-08-29 23:26:21.000000000 -0400
22453@@ -1683,6 +1683,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
22454 convert_pfn_mfn(init_level4_pgt);
22455 convert_pfn_mfn(level3_ident_pgt);
22456 convert_pfn_mfn(level3_kernel_pgt);
22457+ convert_pfn_mfn(level3_vmalloc_pgt);
22458+ convert_pfn_mfn(level3_vmemmap_pgt);
22459
22460 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22461 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22462@@ -1701,7 +1703,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
22463 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22464 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22465 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22466+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22467+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22468 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22469+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22470 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22471 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22472
22473@@ -1913,6 +1918,7 @@ static void __init xen_post_allocator_in
22474 pv_mmu_ops.set_pud = xen_set_pud;
22475 #if PAGETABLE_LEVELS == 4
22476 pv_mmu_ops.set_pgd = xen_set_pgd;
22477+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
22478 #endif
22479
22480 /* This will work as long as patching hasn't happened yet
22481@@ -1994,6 +2000,7 @@ static const struct pv_mmu_ops xen_mmu_o
22482 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22483 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22484 .set_pgd = xen_set_pgd_hyper,
22485+ .set_pgd_batched = xen_set_pgd_hyper,
22486
22487 .alloc_pud = xen_alloc_pmd_init,
22488 .release_pud = xen_release_pmd_init,
22489diff -urNp linux-3.0.7/arch/x86/xen/smp.c linux-3.0.7/arch/x86/xen/smp.c
22490--- linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:54:53.000000000 -0400
22491+++ linux-3.0.7/arch/x86/xen/smp.c 2011-10-16 21:55:27.000000000 -0400
22492@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
22493 {
22494 BUG_ON(smp_processor_id() != 0);
22495 native_smp_prepare_boot_cpu();
22496-
22497- /* We've switched to the "real" per-cpu gdt, so make sure the
22498- old memory can be recycled */
22499- make_lowmem_page_readwrite(xen_initial_gdt);
22500-
22501 xen_filter_cpu_maps();
22502 xen_setup_vcpu_info_placement();
22503 }
22504@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
22505 gdt = get_cpu_gdt_table(cpu);
22506
22507 ctxt->flags = VGCF_IN_KERNEL;
22508- ctxt->user_regs.ds = __USER_DS;
22509- ctxt->user_regs.es = __USER_DS;
22510+ ctxt->user_regs.ds = __KERNEL_DS;
22511+ ctxt->user_regs.es = __KERNEL_DS;
22512 ctxt->user_regs.ss = __KERNEL_DS;
22513 #ifdef CONFIG_X86_32
22514 ctxt->user_regs.fs = __KERNEL_PERCPU;
22515- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22516+ savesegment(gs, ctxt->user_regs.gs);
22517 #else
22518 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22519 #endif
22520@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
22521 int rc;
22522
22523 per_cpu(current_task, cpu) = idle;
22524+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22525 #ifdef CONFIG_X86_32
22526 irq_ctx_init(cpu);
22527 #else
22528 clear_tsk_thread_flag(idle, TIF_FORK);
22529- per_cpu(kernel_stack, cpu) =
22530- (unsigned long)task_stack_page(idle) -
22531- KERNEL_STACK_OFFSET + THREAD_SIZE;
22532+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22533 #endif
22534 xen_setup_runstate_info(cpu);
22535 xen_setup_timer(cpu);
22536diff -urNp linux-3.0.7/arch/x86/xen/xen-asm_32.S linux-3.0.7/arch/x86/xen/xen-asm_32.S
22537--- linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:54:53.000000000 -0400
22538+++ linux-3.0.7/arch/x86/xen/xen-asm_32.S 2011-10-16 21:55:27.000000000 -0400
22539@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22540 ESP_OFFSET=4 # bytes pushed onto stack
22541
22542 /*
22543- * Store vcpu_info pointer for easy access. Do it this way to
22544- * avoid having to reload %fs
22545+ * Store vcpu_info pointer for easy access.
22546 */
22547 #ifdef CONFIG_SMP
22548- GET_THREAD_INFO(%eax)
22549- movl TI_cpu(%eax), %eax
22550- movl __per_cpu_offset(,%eax,4), %eax
22551- mov xen_vcpu(%eax), %eax
22552+ push %fs
22553+ mov $(__KERNEL_PERCPU), %eax
22554+ mov %eax, %fs
22555+ mov PER_CPU_VAR(xen_vcpu), %eax
22556+ pop %fs
22557 #else
22558 movl xen_vcpu, %eax
22559 #endif
22560diff -urNp linux-3.0.7/arch/x86/xen/xen-head.S linux-3.0.7/arch/x86/xen/xen-head.S
22561--- linux-3.0.7/arch/x86/xen/xen-head.S 2011-07-21 22:17:23.000000000 -0400
22562+++ linux-3.0.7/arch/x86/xen/xen-head.S 2011-08-23 21:47:55.000000000 -0400
22563@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22564 #ifdef CONFIG_X86_32
22565 mov %esi,xen_start_info
22566 mov $init_thread_union+THREAD_SIZE,%esp
22567+#ifdef CONFIG_SMP
22568+ movl $cpu_gdt_table,%edi
22569+ movl $__per_cpu_load,%eax
22570+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22571+ rorl $16,%eax
22572+ movb %al,__KERNEL_PERCPU + 4(%edi)
22573+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22574+ movl $__per_cpu_end - 1,%eax
22575+ subl $__per_cpu_start,%eax
22576+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22577+#endif
22578 #else
22579 mov %rsi,xen_start_info
22580 mov $init_thread_union+THREAD_SIZE,%rsp
22581diff -urNp linux-3.0.7/arch/x86/xen/xen-ops.h linux-3.0.7/arch/x86/xen/xen-ops.h
22582--- linux-3.0.7/arch/x86/xen/xen-ops.h 2011-09-02 18:11:21.000000000 -0400
22583+++ linux-3.0.7/arch/x86/xen/xen-ops.h 2011-08-23 21:47:55.000000000 -0400
22584@@ -10,8 +10,6 @@
22585 extern const char xen_hypervisor_callback[];
22586 extern const char xen_failsafe_callback[];
22587
22588-extern void *xen_initial_gdt;
22589-
22590 struct trap_info;
22591 void xen_copy_trap_info(struct trap_info *traps);
22592
22593diff -urNp linux-3.0.7/block/blk-iopoll.c linux-3.0.7/block/blk-iopoll.c
22594--- linux-3.0.7/block/blk-iopoll.c 2011-07-21 22:17:23.000000000 -0400
22595+++ linux-3.0.7/block/blk-iopoll.c 2011-08-23 21:47:55.000000000 -0400
22596@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22597 }
22598 EXPORT_SYMBOL(blk_iopoll_complete);
22599
22600-static void blk_iopoll_softirq(struct softirq_action *h)
22601+static void blk_iopoll_softirq(void)
22602 {
22603 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22604 int rearm = 0, budget = blk_iopoll_budget;
22605diff -urNp linux-3.0.7/block/blk-map.c linux-3.0.7/block/blk-map.c
22606--- linux-3.0.7/block/blk-map.c 2011-07-21 22:17:23.000000000 -0400
22607+++ linux-3.0.7/block/blk-map.c 2011-08-23 21:47:55.000000000 -0400
22608@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
22609 if (!len || !kbuf)
22610 return -EINVAL;
22611
22612- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
22613+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
22614 if (do_copy)
22615 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22616 else
22617diff -urNp linux-3.0.7/block/blk-softirq.c linux-3.0.7/block/blk-softirq.c
22618--- linux-3.0.7/block/blk-softirq.c 2011-07-21 22:17:23.000000000 -0400
22619+++ linux-3.0.7/block/blk-softirq.c 2011-08-23 21:47:55.000000000 -0400
22620@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22621 * Softirq action handler - move entries to local list and loop over them
22622 * while passing them to the queue registered handler.
22623 */
22624-static void blk_done_softirq(struct softirq_action *h)
22625+static void blk_done_softirq(void)
22626 {
22627 struct list_head *cpu_list, local_list;
22628
22629diff -urNp linux-3.0.7/block/bsg.c linux-3.0.7/block/bsg.c
22630--- linux-3.0.7/block/bsg.c 2011-07-21 22:17:23.000000000 -0400
22631+++ linux-3.0.7/block/bsg.c 2011-10-06 04:17:55.000000000 -0400
22632@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22633 struct sg_io_v4 *hdr, struct bsg_device *bd,
22634 fmode_t has_write_perm)
22635 {
22636+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22637+ unsigned char *cmdptr;
22638+
22639 if (hdr->request_len > BLK_MAX_CDB) {
22640 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22641 if (!rq->cmd)
22642 return -ENOMEM;
22643- }
22644+ cmdptr = rq->cmd;
22645+ } else
22646+ cmdptr = tmpcmd;
22647
22648- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22649+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
22650 hdr->request_len))
22651 return -EFAULT;
22652
22653+ if (cmdptr != rq->cmd)
22654+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22655+
22656 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22657 if (blk_verify_command(rq->cmd, has_write_perm))
22658 return -EPERM;
22659@@ -249,7 +257,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22660 struct request *rq, *next_rq = NULL;
22661 int ret, rw;
22662 unsigned int dxfer_len;
22663- void *dxferp = NULL;
22664+ void __user *dxferp = NULL;
22665 struct bsg_class_device *bcd = &q->bsg_dev;
22666
22667 /* if the LLD has been removed then the bsg_unregister_queue will
22668@@ -291,7 +299,7 @@ bsg_map_hdr(struct bsg_device *bd, struc
22669 rq->next_rq = next_rq;
22670 next_rq->cmd_type = rq->cmd_type;
22671
22672- dxferp = (void*)(unsigned long)hdr->din_xferp;
22673+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22674 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
22675 hdr->din_xfer_len, GFP_KERNEL);
22676 if (ret)
22677@@ -300,10 +308,10 @@ bsg_map_hdr(struct bsg_device *bd, struc
22678
22679 if (hdr->dout_xfer_len) {
22680 dxfer_len = hdr->dout_xfer_len;
22681- dxferp = (void*)(unsigned long)hdr->dout_xferp;
22682+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
22683 } else if (hdr->din_xfer_len) {
22684 dxfer_len = hdr->din_xfer_len;
22685- dxferp = (void*)(unsigned long)hdr->din_xferp;
22686+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
22687 } else
22688 dxfer_len = 0;
22689
22690@@ -445,7 +453,7 @@ static int blk_complete_sgv4_hdr_rq(stru
22691 int len = min_t(unsigned int, hdr->max_response_len,
22692 rq->sense_len);
22693
22694- ret = copy_to_user((void*)(unsigned long)hdr->response,
22695+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
22696 rq->sense, len);
22697 if (!ret)
22698 hdr->response_len = len;
22699diff -urNp linux-3.0.7/block/compat_ioctl.c linux-3.0.7/block/compat_ioctl.c
22700--- linux-3.0.7/block/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22701+++ linux-3.0.7/block/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
22702@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_
22703 err |= __get_user(f->spec1, &uf->spec1);
22704 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
22705 err |= __get_user(name, &uf->name);
22706- f->name = compat_ptr(name);
22707+ f->name = (void __force_kernel *)compat_ptr(name);
22708 if (err) {
22709 err = -EFAULT;
22710 goto out;
22711diff -urNp linux-3.0.7/block/scsi_ioctl.c linux-3.0.7/block/scsi_ioctl.c
22712--- linux-3.0.7/block/scsi_ioctl.c 2011-07-21 22:17:23.000000000 -0400
22713+++ linux-3.0.7/block/scsi_ioctl.c 2011-08-23 21:47:55.000000000 -0400
22714@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
22715 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22716 struct sg_io_hdr *hdr, fmode_t mode)
22717 {
22718- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22719+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22720+ unsigned char *cmdptr;
22721+
22722+ if (rq->cmd != rq->__cmd)
22723+ cmdptr = rq->cmd;
22724+ else
22725+ cmdptr = tmpcmd;
22726+
22727+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22728 return -EFAULT;
22729+
22730+ if (cmdptr != rq->cmd)
22731+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22732+
22733 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22734 return -EPERM;
22735
22736@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
22737 int err;
22738 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22739 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22740+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22741+ unsigned char *cmdptr;
22742
22743 if (!sic)
22744 return -EINVAL;
22745@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
22746 */
22747 err = -EFAULT;
22748 rq->cmd_len = cmdlen;
22749- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22750+
22751+ if (rq->cmd != rq->__cmd)
22752+ cmdptr = rq->cmd;
22753+ else
22754+ cmdptr = tmpcmd;
22755+
22756+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22757 goto error;
22758
22759+ if (rq->cmd != cmdptr)
22760+ memcpy(rq->cmd, cmdptr, cmdlen);
22761+
22762 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22763 goto error;
22764
22765diff -urNp linux-3.0.7/crypto/cryptd.c linux-3.0.7/crypto/cryptd.c
22766--- linux-3.0.7/crypto/cryptd.c 2011-07-21 22:17:23.000000000 -0400
22767+++ linux-3.0.7/crypto/cryptd.c 2011-08-23 21:47:55.000000000 -0400
22768@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
22769
22770 struct cryptd_blkcipher_request_ctx {
22771 crypto_completion_t complete;
22772-};
22773+} __no_const;
22774
22775 struct cryptd_hash_ctx {
22776 struct crypto_shash *child;
22777@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
22778
22779 struct cryptd_aead_request_ctx {
22780 crypto_completion_t complete;
22781-};
22782+} __no_const;
22783
22784 static void cryptd_queue_worker(struct work_struct *work);
22785
22786diff -urNp linux-3.0.7/crypto/gf128mul.c linux-3.0.7/crypto/gf128mul.c
22787--- linux-3.0.7/crypto/gf128mul.c 2011-07-21 22:17:23.000000000 -0400
22788+++ linux-3.0.7/crypto/gf128mul.c 2011-08-23 21:47:55.000000000 -0400
22789@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
22790 for (i = 0; i < 7; ++i)
22791 gf128mul_x_lle(&p[i + 1], &p[i]);
22792
22793- memset(r, 0, sizeof(r));
22794+ memset(r, 0, sizeof(*r));
22795 for (i = 0;;) {
22796 u8 ch = ((u8 *)b)[15 - i];
22797
22798@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
22799 for (i = 0; i < 7; ++i)
22800 gf128mul_x_bbe(&p[i + 1], &p[i]);
22801
22802- memset(r, 0, sizeof(r));
22803+ memset(r, 0, sizeof(*r));
22804 for (i = 0;;) {
22805 u8 ch = ((u8 *)b)[i];
22806
22807diff -urNp linux-3.0.7/crypto/serpent.c linux-3.0.7/crypto/serpent.c
22808--- linux-3.0.7/crypto/serpent.c 2011-07-21 22:17:23.000000000 -0400
22809+++ linux-3.0.7/crypto/serpent.c 2011-08-23 21:48:14.000000000 -0400
22810@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22811 u32 r0,r1,r2,r3,r4;
22812 int i;
22813
22814+ pax_track_stack();
22815+
22816 /* Copy key, add padding */
22817
22818 for (i = 0; i < keylen; ++i)
22819diff -urNp linux-3.0.7/Documentation/dontdiff linux-3.0.7/Documentation/dontdiff
22820--- linux-3.0.7/Documentation/dontdiff 2011-07-21 22:17:23.000000000 -0400
22821+++ linux-3.0.7/Documentation/dontdiff 2011-10-07 19:07:23.000000000 -0400
22822@@ -5,6 +5,7 @@
22823 *.cis
22824 *.cpio
22825 *.csp
22826+*.dbg
22827 *.dsp
22828 *.dvi
22829 *.elf
22830@@ -48,9 +49,11 @@
22831 *.tab.h
22832 *.tex
22833 *.ver
22834+*.vim
22835 *.xml
22836 *.xz
22837 *_MODULES
22838+*_reg_safe.h
22839 *_vga16.c
22840 *~
22841 \#*#
22842@@ -70,6 +73,7 @@ Kerntypes
22843 Module.markers
22844 Module.symvers
22845 PENDING
22846+PERF*
22847 SCCS
22848 System.map*
22849 TAGS
22850@@ -98,6 +102,8 @@ bzImage*
22851 capability_names.h
22852 capflags.c
22853 classlist.h*
22854+clut_vga16.c
22855+common-cmds.h
22856 comp*.log
22857 compile.h*
22858 conf
22859@@ -126,12 +132,14 @@ fore200e_pca_fw.c*
22860 gconf
22861 gconf.glade.h
22862 gen-devlist
22863+gen-kdb_cmds.c
22864 gen_crc32table
22865 gen_init_cpio
22866 generated
22867 genheaders
22868 genksyms
22869 *_gray256.c
22870+hash
22871 hpet_example
22872 hugepage-mmap
22873 hugepage-shm
22874@@ -146,7 +154,6 @@ int32.c
22875 int4.c
22876 int8.c
22877 kallsyms
22878-kconfig
22879 keywords.c
22880 ksym.c*
22881 ksym.h*
22882@@ -154,7 +161,6 @@ kxgettext
22883 lkc_defs.h
22884 lex.c
22885 lex.*.c
22886-linux
22887 logo_*.c
22888 logo_*_clut224.c
22889 logo_*_mono.c
22890@@ -166,7 +172,6 @@ machtypes.h
22891 map
22892 map_hugetlb
22893 maui_boot.h
22894-media
22895 mconf
22896 miboot*
22897 mk_elfconfig
22898@@ -174,6 +179,7 @@ mkboot
22899 mkbugboot
22900 mkcpustr
22901 mkdep
22902+mkpiggy
22903 mkprep
22904 mkregtable
22905 mktables
22906@@ -209,6 +215,7 @@ r300_reg_safe.h
22907 r420_reg_safe.h
22908 r600_reg_safe.h
22909 recordmcount
22910+regdb.c
22911 relocs
22912 rlim_names.h
22913 rn50_reg_safe.h
22914@@ -219,6 +226,7 @@ setup
22915 setup.bin
22916 setup.elf
22917 sImage
22918+slabinfo
22919 sm_tbl*
22920 split-include
22921 syscalltab.h
22922@@ -246,7 +254,9 @@ vmlinux
22923 vmlinux-*
22924 vmlinux.aout
22925 vmlinux.bin.all
22926+vmlinux.bin.bz2
22927 vmlinux.lds
22928+vmlinux.relocs
22929 vmlinuz
22930 voffset.h
22931 vsyscall.lds
22932@@ -254,6 +264,7 @@ vsyscall_32.lds
22933 wanxlfw.inc
22934 uImage
22935 unifdef
22936+utsrelease.h
22937 wakeup.bin
22938 wakeup.elf
22939 wakeup.lds
22940diff -urNp linux-3.0.7/Documentation/kernel-parameters.txt linux-3.0.7/Documentation/kernel-parameters.txt
22941--- linux-3.0.7/Documentation/kernel-parameters.txt 2011-07-21 22:17:23.000000000 -0400
22942+++ linux-3.0.7/Documentation/kernel-parameters.txt 2011-08-23 21:47:55.000000000 -0400
22943@@ -1883,6 +1883,13 @@ bytes respectively. Such letter suffixes
22944 the specified number of seconds. This is to be used if
22945 your oopses keep scrolling off the screen.
22946
22947+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22948+ virtualization environments that don't cope well with the
22949+ expand down segment used by UDEREF on X86-32 or the frequent
22950+ page table updates on X86-64.
22951+
22952+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22953+
22954 pcbit= [HW,ISDN]
22955
22956 pcd. [PARIDE]
22957diff -urNp linux-3.0.7/drivers/acpi/apei/cper.c linux-3.0.7/drivers/acpi/apei/cper.c
22958--- linux-3.0.7/drivers/acpi/apei/cper.c 2011-07-21 22:17:23.000000000 -0400
22959+++ linux-3.0.7/drivers/acpi/apei/cper.c 2011-08-23 21:47:55.000000000 -0400
22960@@ -38,12 +38,12 @@
22961 */
22962 u64 cper_next_record_id(void)
22963 {
22964- static atomic64_t seq;
22965+ static atomic64_unchecked_t seq;
22966
22967- if (!atomic64_read(&seq))
22968- atomic64_set(&seq, ((u64)get_seconds()) << 32);
22969+ if (!atomic64_read_unchecked(&seq))
22970+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22971
22972- return atomic64_inc_return(&seq);
22973+ return atomic64_inc_return_unchecked(&seq);
22974 }
22975 EXPORT_SYMBOL_GPL(cper_next_record_id);
22976
22977diff -urNp linux-3.0.7/drivers/acpi/ec_sys.c linux-3.0.7/drivers/acpi/ec_sys.c
22978--- linux-3.0.7/drivers/acpi/ec_sys.c 2011-07-21 22:17:23.000000000 -0400
22979+++ linux-3.0.7/drivers/acpi/ec_sys.c 2011-08-24 19:06:55.000000000 -0400
22980@@ -11,6 +11,7 @@
22981 #include <linux/kernel.h>
22982 #include <linux/acpi.h>
22983 #include <linux/debugfs.h>
22984+#include <asm/uaccess.h>
22985 #include "internal.h"
22986
22987 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
22988@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
22989 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
22990 */
22991 unsigned int size = EC_SPACE_SIZE;
22992- u8 *data = (u8 *) buf;
22993+ u8 data;
22994 loff_t init_off = *off;
22995 int err = 0;
22996
22997@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
22998 size = count;
22999
23000 while (size) {
23001- err = ec_read(*off, &data[*off - init_off]);
23002+ err = ec_read(*off, &data);
23003 if (err)
23004 return err;
23005+ if (put_user(data, &buf[*off - init_off]))
23006+ return -EFAULT;
23007 *off += 1;
23008 size--;
23009 }
23010@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23011
23012 unsigned int size = count;
23013 loff_t init_off = *off;
23014- u8 *data = (u8 *) buf;
23015 int err = 0;
23016
23017 if (*off >= EC_SPACE_SIZE)
23018@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23019 }
23020
23021 while (size) {
23022- u8 byte_write = data[*off - init_off];
23023+ u8 byte_write;
23024+ if (get_user(byte_write, &buf[*off - init_off]))
23025+ return -EFAULT;
23026 err = ec_write(*off, byte_write);
23027 if (err)
23028 return err;
23029diff -urNp linux-3.0.7/drivers/acpi/proc.c linux-3.0.7/drivers/acpi/proc.c
23030--- linux-3.0.7/drivers/acpi/proc.c 2011-07-21 22:17:23.000000000 -0400
23031+++ linux-3.0.7/drivers/acpi/proc.c 2011-08-23 21:47:55.000000000 -0400
23032@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23033 size_t count, loff_t * ppos)
23034 {
23035 struct list_head *node, *next;
23036- char strbuf[5];
23037- char str[5] = "";
23038- unsigned int len = count;
23039-
23040- if (len > 4)
23041- len = 4;
23042- if (len < 0)
23043- return -EFAULT;
23044+ char strbuf[5] = {0};
23045
23046- if (copy_from_user(strbuf, buffer, len))
23047+ if (count > 4)
23048+ count = 4;
23049+ if (copy_from_user(strbuf, buffer, count))
23050 return -EFAULT;
23051- strbuf[len] = '\0';
23052- sscanf(strbuf, "%s", str);
23053+ strbuf[count] = '\0';
23054
23055 mutex_lock(&acpi_device_lock);
23056 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23057@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23058 if (!dev->wakeup.flags.valid)
23059 continue;
23060
23061- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23062+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23063 if (device_can_wakeup(&dev->dev)) {
23064 bool enable = !device_may_wakeup(&dev->dev);
23065 device_set_wakeup_enable(&dev->dev, enable);
23066diff -urNp linux-3.0.7/drivers/acpi/processor_driver.c linux-3.0.7/drivers/acpi/processor_driver.c
23067--- linux-3.0.7/drivers/acpi/processor_driver.c 2011-07-21 22:17:23.000000000 -0400
23068+++ linux-3.0.7/drivers/acpi/processor_driver.c 2011-08-23 21:47:55.000000000 -0400
23069@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23070 return 0;
23071 #endif
23072
23073- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23074+ BUG_ON(pr->id >= nr_cpu_ids);
23075
23076 /*
23077 * Buggy BIOS check
23078diff -urNp linux-3.0.7/drivers/ata/libata-core.c linux-3.0.7/drivers/ata/libata-core.c
23079--- linux-3.0.7/drivers/ata/libata-core.c 2011-07-21 22:17:23.000000000 -0400
23080+++ linux-3.0.7/drivers/ata/libata-core.c 2011-08-23 21:47:55.000000000 -0400
23081@@ -4753,7 +4753,7 @@ void ata_qc_free(struct ata_queued_cmd *
23082 struct ata_port *ap;
23083 unsigned int tag;
23084
23085- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23086+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23087 ap = qc->ap;
23088
23089 qc->flags = 0;
23090@@ -4769,7 +4769,7 @@ void __ata_qc_complete(struct ata_queued
23091 struct ata_port *ap;
23092 struct ata_link *link;
23093
23094- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23095+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23096 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23097 ap = qc->ap;
23098 link = qc->dev->link;
23099@@ -5774,6 +5774,7 @@ static void ata_finalize_port_ops(struct
23100 return;
23101
23102 spin_lock(&lock);
23103+ pax_open_kernel();
23104
23105 for (cur = ops->inherits; cur; cur = cur->inherits) {
23106 void **inherit = (void **)cur;
23107@@ -5787,8 +5788,9 @@ static void ata_finalize_port_ops(struct
23108 if (IS_ERR(*pp))
23109 *pp = NULL;
23110
23111- ops->inherits = NULL;
23112+ *(struct ata_port_operations **)&ops->inherits = NULL;
23113
23114+ pax_close_kernel();
23115 spin_unlock(&lock);
23116 }
23117
23118diff -urNp linux-3.0.7/drivers/ata/libata-eh.c linux-3.0.7/drivers/ata/libata-eh.c
23119--- linux-3.0.7/drivers/ata/libata-eh.c 2011-07-21 22:17:23.000000000 -0400
23120+++ linux-3.0.7/drivers/ata/libata-eh.c 2011-08-23 21:48:14.000000000 -0400
23121@@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
23122 {
23123 struct ata_link *link;
23124
23125+ pax_track_stack();
23126+
23127 ata_for_each_link(link, ap, HOST_FIRST)
23128 ata_eh_link_report(link);
23129 }
23130diff -urNp linux-3.0.7/drivers/ata/pata_arasan_cf.c linux-3.0.7/drivers/ata/pata_arasan_cf.c
23131--- linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-07-21 22:17:23.000000000 -0400
23132+++ linux-3.0.7/drivers/ata/pata_arasan_cf.c 2011-08-23 21:47:55.000000000 -0400
23133@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23134 /* Handle platform specific quirks */
23135 if (pdata->quirk) {
23136 if (pdata->quirk & CF_BROKEN_PIO) {
23137- ap->ops->set_piomode = NULL;
23138+ pax_open_kernel();
23139+ *(void **)&ap->ops->set_piomode = NULL;
23140+ pax_close_kernel();
23141 ap->pio_mask = 0;
23142 }
23143 if (pdata->quirk & CF_BROKEN_MWDMA)
23144diff -urNp linux-3.0.7/drivers/atm/adummy.c linux-3.0.7/drivers/atm/adummy.c
23145--- linux-3.0.7/drivers/atm/adummy.c 2011-07-21 22:17:23.000000000 -0400
23146+++ linux-3.0.7/drivers/atm/adummy.c 2011-08-23 21:47:55.000000000 -0400
23147@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23148 vcc->pop(vcc, skb);
23149 else
23150 dev_kfree_skb_any(skb);
23151- atomic_inc(&vcc->stats->tx);
23152+ atomic_inc_unchecked(&vcc->stats->tx);
23153
23154 return 0;
23155 }
23156diff -urNp linux-3.0.7/drivers/atm/ambassador.c linux-3.0.7/drivers/atm/ambassador.c
23157--- linux-3.0.7/drivers/atm/ambassador.c 2011-07-21 22:17:23.000000000 -0400
23158+++ linux-3.0.7/drivers/atm/ambassador.c 2011-08-23 21:47:55.000000000 -0400
23159@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23160 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23161
23162 // VC layer stats
23163- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23164+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23165
23166 // free the descriptor
23167 kfree (tx_descr);
23168@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23169 dump_skb ("<<<", vc, skb);
23170
23171 // VC layer stats
23172- atomic_inc(&atm_vcc->stats->rx);
23173+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23174 __net_timestamp(skb);
23175 // end of our responsibility
23176 atm_vcc->push (atm_vcc, skb);
23177@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23178 } else {
23179 PRINTK (KERN_INFO, "dropped over-size frame");
23180 // should we count this?
23181- atomic_inc(&atm_vcc->stats->rx_drop);
23182+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23183 }
23184
23185 } else {
23186@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
23187 }
23188
23189 if (check_area (skb->data, skb->len)) {
23190- atomic_inc(&atm_vcc->stats->tx_err);
23191+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23192 return -ENOMEM; // ?
23193 }
23194
23195diff -urNp linux-3.0.7/drivers/atm/atmtcp.c linux-3.0.7/drivers/atm/atmtcp.c
23196--- linux-3.0.7/drivers/atm/atmtcp.c 2011-07-21 22:17:23.000000000 -0400
23197+++ linux-3.0.7/drivers/atm/atmtcp.c 2011-08-23 21:47:55.000000000 -0400
23198@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23199 if (vcc->pop) vcc->pop(vcc,skb);
23200 else dev_kfree_skb(skb);
23201 if (dev_data) return 0;
23202- atomic_inc(&vcc->stats->tx_err);
23203+ atomic_inc_unchecked(&vcc->stats->tx_err);
23204 return -ENOLINK;
23205 }
23206 size = skb->len+sizeof(struct atmtcp_hdr);
23207@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23208 if (!new_skb) {
23209 if (vcc->pop) vcc->pop(vcc,skb);
23210 else dev_kfree_skb(skb);
23211- atomic_inc(&vcc->stats->tx_err);
23212+ atomic_inc_unchecked(&vcc->stats->tx_err);
23213 return -ENOBUFS;
23214 }
23215 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23216@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23217 if (vcc->pop) vcc->pop(vcc,skb);
23218 else dev_kfree_skb(skb);
23219 out_vcc->push(out_vcc,new_skb);
23220- atomic_inc(&vcc->stats->tx);
23221- atomic_inc(&out_vcc->stats->rx);
23222+ atomic_inc_unchecked(&vcc->stats->tx);
23223+ atomic_inc_unchecked(&out_vcc->stats->rx);
23224 return 0;
23225 }
23226
23227@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23228 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23229 read_unlock(&vcc_sklist_lock);
23230 if (!out_vcc) {
23231- atomic_inc(&vcc->stats->tx_err);
23232+ atomic_inc_unchecked(&vcc->stats->tx_err);
23233 goto done;
23234 }
23235 skb_pull(skb,sizeof(struct atmtcp_hdr));
23236@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23237 __net_timestamp(new_skb);
23238 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23239 out_vcc->push(out_vcc,new_skb);
23240- atomic_inc(&vcc->stats->tx);
23241- atomic_inc(&out_vcc->stats->rx);
23242+ atomic_inc_unchecked(&vcc->stats->tx);
23243+ atomic_inc_unchecked(&out_vcc->stats->rx);
23244 done:
23245 if (vcc->pop) vcc->pop(vcc,skb);
23246 else dev_kfree_skb(skb);
23247diff -urNp linux-3.0.7/drivers/atm/eni.c linux-3.0.7/drivers/atm/eni.c
23248--- linux-3.0.7/drivers/atm/eni.c 2011-07-21 22:17:23.000000000 -0400
23249+++ linux-3.0.7/drivers/atm/eni.c 2011-10-11 10:44:33.000000000 -0400
23250@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23251 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23252 vcc->dev->number);
23253 length = 0;
23254- atomic_inc(&vcc->stats->rx_err);
23255+ atomic_inc_unchecked(&vcc->stats->rx_err);
23256 }
23257 else {
23258 length = ATM_CELL_SIZE-1; /* no HEC */
23259@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23260 size);
23261 }
23262 eff = length = 0;
23263- atomic_inc(&vcc->stats->rx_err);
23264+ atomic_inc_unchecked(&vcc->stats->rx_err);
23265 }
23266 else {
23267 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23268@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23269 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23270 vcc->dev->number,vcc->vci,length,size << 2,descr);
23271 length = eff = 0;
23272- atomic_inc(&vcc->stats->rx_err);
23273+ atomic_inc_unchecked(&vcc->stats->rx_err);
23274 }
23275 }
23276 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23277@@ -771,7 +771,7 @@ rx_dequeued++;
23278 vcc->push(vcc,skb);
23279 pushed++;
23280 }
23281- atomic_inc(&vcc->stats->rx);
23282+ atomic_inc_unchecked(&vcc->stats->rx);
23283 }
23284 wake_up(&eni_dev->rx_wait);
23285 }
23286@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23287 PCI_DMA_TODEVICE);
23288 if (vcc->pop) vcc->pop(vcc,skb);
23289 else dev_kfree_skb_irq(skb);
23290- atomic_inc(&vcc->stats->tx);
23291+ atomic_inc_unchecked(&vcc->stats->tx);
23292 wake_up(&eni_dev->tx_wait);
23293 dma_complete++;
23294 }
23295@@ -1568,7 +1568,7 @@ tx_complete++;
23296 /*--------------------------------- entries ---------------------------------*/
23297
23298
23299-static const char *media_name[] __devinitdata = {
23300+static const char *media_name[] __devinitconst = {
23301 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23302 "UTP", "05?", "06?", "07?", /* 4- 7 */
23303 "TAXI","09?", "10?", "11?", /* 8-11 */
23304diff -urNp linux-3.0.7/drivers/atm/firestream.c linux-3.0.7/drivers/atm/firestream.c
23305--- linux-3.0.7/drivers/atm/firestream.c 2011-07-21 22:17:23.000000000 -0400
23306+++ linux-3.0.7/drivers/atm/firestream.c 2011-08-23 21:47:55.000000000 -0400
23307@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
23308 }
23309 }
23310
23311- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23312+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23313
23314 fs_dprintk (FS_DEBUG_TXMEM, "i");
23315 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23316@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
23317 #endif
23318 skb_put (skb, qe->p1 & 0xffff);
23319 ATM_SKB(skb)->vcc = atm_vcc;
23320- atomic_inc(&atm_vcc->stats->rx);
23321+ atomic_inc_unchecked(&atm_vcc->stats->rx);
23322 __net_timestamp(skb);
23323 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23324 atm_vcc->push (atm_vcc, skb);
23325@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
23326 kfree (pe);
23327 }
23328 if (atm_vcc)
23329- atomic_inc(&atm_vcc->stats->rx_drop);
23330+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23331 break;
23332 case 0x1f: /* Reassembly abort: no buffers. */
23333 /* Silently increment error counter. */
23334 if (atm_vcc)
23335- atomic_inc(&atm_vcc->stats->rx_drop);
23336+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23337 break;
23338 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23339 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23340diff -urNp linux-3.0.7/drivers/atm/fore200e.c linux-3.0.7/drivers/atm/fore200e.c
23341--- linux-3.0.7/drivers/atm/fore200e.c 2011-07-21 22:17:23.000000000 -0400
23342+++ linux-3.0.7/drivers/atm/fore200e.c 2011-08-23 21:47:55.000000000 -0400
23343@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23344 #endif
23345 /* check error condition */
23346 if (*entry->status & STATUS_ERROR)
23347- atomic_inc(&vcc->stats->tx_err);
23348+ atomic_inc_unchecked(&vcc->stats->tx_err);
23349 else
23350- atomic_inc(&vcc->stats->tx);
23351+ atomic_inc_unchecked(&vcc->stats->tx);
23352 }
23353 }
23354
23355@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23356 if (skb == NULL) {
23357 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23358
23359- atomic_inc(&vcc->stats->rx_drop);
23360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23361 return -ENOMEM;
23362 }
23363
23364@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23365
23366 dev_kfree_skb_any(skb);
23367
23368- atomic_inc(&vcc->stats->rx_drop);
23369+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23370 return -ENOMEM;
23371 }
23372
23373 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23374
23375 vcc->push(vcc, skb);
23376- atomic_inc(&vcc->stats->rx);
23377+ atomic_inc_unchecked(&vcc->stats->rx);
23378
23379 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23380
23381@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23382 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23383 fore200e->atm_dev->number,
23384 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23385- atomic_inc(&vcc->stats->rx_err);
23386+ atomic_inc_unchecked(&vcc->stats->rx_err);
23387 }
23388 }
23389
23390@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23391 goto retry_here;
23392 }
23393
23394- atomic_inc(&vcc->stats->tx_err);
23395+ atomic_inc_unchecked(&vcc->stats->tx_err);
23396
23397 fore200e->tx_sat++;
23398 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23399diff -urNp linux-3.0.7/drivers/atm/he.c linux-3.0.7/drivers/atm/he.c
23400--- linux-3.0.7/drivers/atm/he.c 2011-07-21 22:17:23.000000000 -0400
23401+++ linux-3.0.7/drivers/atm/he.c 2011-08-23 21:47:55.000000000 -0400
23402@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23403
23404 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23405 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23406- atomic_inc(&vcc->stats->rx_drop);
23407+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23408 goto return_host_buffers;
23409 }
23410
23411@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23412 RBRQ_LEN_ERR(he_dev->rbrq_head)
23413 ? "LEN_ERR" : "",
23414 vcc->vpi, vcc->vci);
23415- atomic_inc(&vcc->stats->rx_err);
23416+ atomic_inc_unchecked(&vcc->stats->rx_err);
23417 goto return_host_buffers;
23418 }
23419
23420@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23421 vcc->push(vcc, skb);
23422 spin_lock(&he_dev->global_lock);
23423
23424- atomic_inc(&vcc->stats->rx);
23425+ atomic_inc_unchecked(&vcc->stats->rx);
23426
23427 return_host_buffers:
23428 ++pdus_assembled;
23429@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
23430 tpd->vcc->pop(tpd->vcc, tpd->skb);
23431 else
23432 dev_kfree_skb_any(tpd->skb);
23433- atomic_inc(&tpd->vcc->stats->tx_err);
23434+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
23435 }
23436 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
23437 return;
23438@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23439 vcc->pop(vcc, skb);
23440 else
23441 dev_kfree_skb_any(skb);
23442- atomic_inc(&vcc->stats->tx_err);
23443+ atomic_inc_unchecked(&vcc->stats->tx_err);
23444 return -EINVAL;
23445 }
23446
23447@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23448 vcc->pop(vcc, skb);
23449 else
23450 dev_kfree_skb_any(skb);
23451- atomic_inc(&vcc->stats->tx_err);
23452+ atomic_inc_unchecked(&vcc->stats->tx_err);
23453 return -EINVAL;
23454 }
23455 #endif
23456@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23457 vcc->pop(vcc, skb);
23458 else
23459 dev_kfree_skb_any(skb);
23460- atomic_inc(&vcc->stats->tx_err);
23461+ atomic_inc_unchecked(&vcc->stats->tx_err);
23462 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23463 return -ENOMEM;
23464 }
23465@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23466 vcc->pop(vcc, skb);
23467 else
23468 dev_kfree_skb_any(skb);
23469- atomic_inc(&vcc->stats->tx_err);
23470+ atomic_inc_unchecked(&vcc->stats->tx_err);
23471 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23472 return -ENOMEM;
23473 }
23474@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
23475 __enqueue_tpd(he_dev, tpd, cid);
23476 spin_unlock_irqrestore(&he_dev->global_lock, flags);
23477
23478- atomic_inc(&vcc->stats->tx);
23479+ atomic_inc_unchecked(&vcc->stats->tx);
23480
23481 return 0;
23482 }
23483diff -urNp linux-3.0.7/drivers/atm/horizon.c linux-3.0.7/drivers/atm/horizon.c
23484--- linux-3.0.7/drivers/atm/horizon.c 2011-07-21 22:17:23.000000000 -0400
23485+++ linux-3.0.7/drivers/atm/horizon.c 2011-08-23 21:47:55.000000000 -0400
23486@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
23487 {
23488 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
23489 // VC layer stats
23490- atomic_inc(&vcc->stats->rx);
23491+ atomic_inc_unchecked(&vcc->stats->rx);
23492 __net_timestamp(skb);
23493 // end of our responsibility
23494 vcc->push (vcc, skb);
23495@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
23496 dev->tx_iovec = NULL;
23497
23498 // VC layer stats
23499- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23500+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23501
23502 // free the skb
23503 hrz_kfree_skb (skb);
23504diff -urNp linux-3.0.7/drivers/atm/idt77252.c linux-3.0.7/drivers/atm/idt77252.c
23505--- linux-3.0.7/drivers/atm/idt77252.c 2011-07-21 22:17:23.000000000 -0400
23506+++ linux-3.0.7/drivers/atm/idt77252.c 2011-08-23 21:47:55.000000000 -0400
23507@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
23508 else
23509 dev_kfree_skb(skb);
23510
23511- atomic_inc(&vcc->stats->tx);
23512+ atomic_inc_unchecked(&vcc->stats->tx);
23513 }
23514
23515 atomic_dec(&scq->used);
23516@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
23517 if ((sb = dev_alloc_skb(64)) == NULL) {
23518 printk("%s: Can't allocate buffers for aal0.\n",
23519 card->name);
23520- atomic_add(i, &vcc->stats->rx_drop);
23521+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23522 break;
23523 }
23524 if (!atm_charge(vcc, sb->truesize)) {
23525 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
23526 card->name);
23527- atomic_add(i - 1, &vcc->stats->rx_drop);
23528+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
23529 dev_kfree_skb(sb);
23530 break;
23531 }
23532@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
23533 ATM_SKB(sb)->vcc = vcc;
23534 __net_timestamp(sb);
23535 vcc->push(vcc, sb);
23536- atomic_inc(&vcc->stats->rx);
23537+ atomic_inc_unchecked(&vcc->stats->rx);
23538
23539 cell += ATM_CELL_PAYLOAD;
23540 }
23541@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
23542 "(CDC: %08x)\n",
23543 card->name, len, rpp->len, readl(SAR_REG_CDC));
23544 recycle_rx_pool_skb(card, rpp);
23545- atomic_inc(&vcc->stats->rx_err);
23546+ atomic_inc_unchecked(&vcc->stats->rx_err);
23547 return;
23548 }
23549 if (stat & SAR_RSQE_CRC) {
23550 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
23551 recycle_rx_pool_skb(card, rpp);
23552- atomic_inc(&vcc->stats->rx_err);
23553+ atomic_inc_unchecked(&vcc->stats->rx_err);
23554 return;
23555 }
23556 if (skb_queue_len(&rpp->queue) > 1) {
23557@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
23558 RXPRINTK("%s: Can't alloc RX skb.\n",
23559 card->name);
23560 recycle_rx_pool_skb(card, rpp);
23561- atomic_inc(&vcc->stats->rx_err);
23562+ atomic_inc_unchecked(&vcc->stats->rx_err);
23563 return;
23564 }
23565 if (!atm_charge(vcc, skb->truesize)) {
23566@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
23567 __net_timestamp(skb);
23568
23569 vcc->push(vcc, skb);
23570- atomic_inc(&vcc->stats->rx);
23571+ atomic_inc_unchecked(&vcc->stats->rx);
23572
23573 return;
23574 }
23575@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
23576 __net_timestamp(skb);
23577
23578 vcc->push(vcc, skb);
23579- atomic_inc(&vcc->stats->rx);
23580+ atomic_inc_unchecked(&vcc->stats->rx);
23581
23582 if (skb->truesize > SAR_FB_SIZE_3)
23583 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
23584@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
23585 if (vcc->qos.aal != ATM_AAL0) {
23586 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
23587 card->name, vpi, vci);
23588- atomic_inc(&vcc->stats->rx_drop);
23589+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23590 goto drop;
23591 }
23592
23593 if ((sb = dev_alloc_skb(64)) == NULL) {
23594 printk("%s: Can't allocate buffers for AAL0.\n",
23595 card->name);
23596- atomic_inc(&vcc->stats->rx_err);
23597+ atomic_inc_unchecked(&vcc->stats->rx_err);
23598 goto drop;
23599 }
23600
23601@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
23602 ATM_SKB(sb)->vcc = vcc;
23603 __net_timestamp(sb);
23604 vcc->push(vcc, sb);
23605- atomic_inc(&vcc->stats->rx);
23606+ atomic_inc_unchecked(&vcc->stats->rx);
23607
23608 drop:
23609 skb_pull(queue, 64);
23610@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23611
23612 if (vc == NULL) {
23613 printk("%s: NULL connection in send().\n", card->name);
23614- atomic_inc(&vcc->stats->tx_err);
23615+ atomic_inc_unchecked(&vcc->stats->tx_err);
23616 dev_kfree_skb(skb);
23617 return -EINVAL;
23618 }
23619 if (!test_bit(VCF_TX, &vc->flags)) {
23620 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
23621- atomic_inc(&vcc->stats->tx_err);
23622+ atomic_inc_unchecked(&vcc->stats->tx_err);
23623 dev_kfree_skb(skb);
23624 return -EINVAL;
23625 }
23626@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23627 break;
23628 default:
23629 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
23630- atomic_inc(&vcc->stats->tx_err);
23631+ atomic_inc_unchecked(&vcc->stats->tx_err);
23632 dev_kfree_skb(skb);
23633 return -EINVAL;
23634 }
23635
23636 if (skb_shinfo(skb)->nr_frags != 0) {
23637 printk("%s: No scatter-gather yet.\n", card->name);
23638- atomic_inc(&vcc->stats->tx_err);
23639+ atomic_inc_unchecked(&vcc->stats->tx_err);
23640 dev_kfree_skb(skb);
23641 return -EINVAL;
23642 }
23643@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
23644
23645 err = queue_skb(card, vc, skb, oam);
23646 if (err) {
23647- atomic_inc(&vcc->stats->tx_err);
23648+ atomic_inc_unchecked(&vcc->stats->tx_err);
23649 dev_kfree_skb(skb);
23650 return err;
23651 }
23652@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
23653 skb = dev_alloc_skb(64);
23654 if (!skb) {
23655 printk("%s: Out of memory in send_oam().\n", card->name);
23656- atomic_inc(&vcc->stats->tx_err);
23657+ atomic_inc_unchecked(&vcc->stats->tx_err);
23658 return -ENOMEM;
23659 }
23660 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
23661diff -urNp linux-3.0.7/drivers/atm/iphase.c linux-3.0.7/drivers/atm/iphase.c
23662--- linux-3.0.7/drivers/atm/iphase.c 2011-07-21 22:17:23.000000000 -0400
23663+++ linux-3.0.7/drivers/atm/iphase.c 2011-08-23 21:47:55.000000000 -0400
23664@@ -1120,7 +1120,7 @@ static int rx_pkt(struct atm_dev *dev)
23665 status = (u_short) (buf_desc_ptr->desc_mode);
23666 if (status & (RX_CER | RX_PTE | RX_OFL))
23667 {
23668- atomic_inc(&vcc->stats->rx_err);
23669+ atomic_inc_unchecked(&vcc->stats->rx_err);
23670 IF_ERR(printk("IA: bad packet, dropping it");)
23671 if (status & RX_CER) {
23672 IF_ERR(printk(" cause: packet CRC error\n");)
23673@@ -1143,7 +1143,7 @@ static int rx_pkt(struct atm_dev *dev)
23674 len = dma_addr - buf_addr;
23675 if (len > iadev->rx_buf_sz) {
23676 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23677- atomic_inc(&vcc->stats->rx_err);
23678+ atomic_inc_unchecked(&vcc->stats->rx_err);
23679 goto out_free_desc;
23680 }
23681
23682@@ -1293,7 +1293,7 @@ static void rx_dle_intr(struct atm_dev *
23683 ia_vcc = INPH_IA_VCC(vcc);
23684 if (ia_vcc == NULL)
23685 {
23686- atomic_inc(&vcc->stats->rx_err);
23687+ atomic_inc_unchecked(&vcc->stats->rx_err);
23688 dev_kfree_skb_any(skb);
23689 atm_return(vcc, atm_guess_pdu2truesize(len));
23690 goto INCR_DLE;
23691@@ -1305,7 +1305,7 @@ static void rx_dle_intr(struct atm_dev *
23692 if ((length > iadev->rx_buf_sz) || (length >
23693 (skb->len - sizeof(struct cpcs_trailer))))
23694 {
23695- atomic_inc(&vcc->stats->rx_err);
23696+ atomic_inc_unchecked(&vcc->stats->rx_err);
23697 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23698 length, skb->len);)
23699 dev_kfree_skb_any(skb);
23700@@ -1321,7 +1321,7 @@ static void rx_dle_intr(struct atm_dev *
23701
23702 IF_RX(printk("rx_dle_intr: skb push");)
23703 vcc->push(vcc,skb);
23704- atomic_inc(&vcc->stats->rx);
23705+ atomic_inc_unchecked(&vcc->stats->rx);
23706 iadev->rx_pkt_cnt++;
23707 }
23708 INCR_DLE:
23709@@ -2801,15 +2801,15 @@ static int ia_ioctl(struct atm_dev *dev,
23710 {
23711 struct k_sonet_stats *stats;
23712 stats = &PRIV(_ia_dev[board])->sonet_stats;
23713- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23714- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23715- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23716- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23717- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23718- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23719- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23720- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23721- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23722+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23723+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23724+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23725+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23726+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23727+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23728+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23729+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23730+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23731 }
23732 ia_cmds.status = 0;
23733 break;
23734@@ -2914,7 +2914,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23735 if ((desc == 0) || (desc > iadev->num_tx_desc))
23736 {
23737 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23738- atomic_inc(&vcc->stats->tx);
23739+ atomic_inc_unchecked(&vcc->stats->tx);
23740 if (vcc->pop)
23741 vcc->pop(vcc, skb);
23742 else
23743@@ -3019,14 +3019,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23744 ATM_DESC(skb) = vcc->vci;
23745 skb_queue_tail(&iadev->tx_dma_q, skb);
23746
23747- atomic_inc(&vcc->stats->tx);
23748+ atomic_inc_unchecked(&vcc->stats->tx);
23749 iadev->tx_pkt_cnt++;
23750 /* Increment transaction counter */
23751 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23752
23753 #if 0
23754 /* add flow control logic */
23755- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23756+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23757 if (iavcc->vc_desc_cnt > 10) {
23758 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23759 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23760diff -urNp linux-3.0.7/drivers/atm/lanai.c linux-3.0.7/drivers/atm/lanai.c
23761--- linux-3.0.7/drivers/atm/lanai.c 2011-07-21 22:17:23.000000000 -0400
23762+++ linux-3.0.7/drivers/atm/lanai.c 2011-08-23 21:47:55.000000000 -0400
23763@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23764 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23765 lanai_endtx(lanai, lvcc);
23766 lanai_free_skb(lvcc->tx.atmvcc, skb);
23767- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23768+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23769 }
23770
23771 /* Try to fill the buffer - don't call unless there is backlog */
23772@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23773 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23774 __net_timestamp(skb);
23775 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23776- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23777+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23778 out:
23779 lvcc->rx.buf.ptr = end;
23780 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23781@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23782 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23783 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23784 lanai->stats.service_rxnotaal5++;
23785- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23786+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23787 return 0;
23788 }
23789 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23790@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23791 int bytes;
23792 read_unlock(&vcc_sklist_lock);
23793 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23794- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23795+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23796 lvcc->stats.x.aal5.service_trash++;
23797 bytes = (SERVICE_GET_END(s) * 16) -
23798 (((unsigned long) lvcc->rx.buf.ptr) -
23799@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23800 }
23801 if (s & SERVICE_STREAM) {
23802 read_unlock(&vcc_sklist_lock);
23803- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23804+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23805 lvcc->stats.x.aal5.service_stream++;
23806 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23807 "PDU on VCI %d!\n", lanai->number, vci);
23808@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23809 return 0;
23810 }
23811 DPRINTK("got rx crc error on vci %d\n", vci);
23812- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23813+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23814 lvcc->stats.x.aal5.service_rxcrc++;
23815 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23816 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23817diff -urNp linux-3.0.7/drivers/atm/nicstar.c linux-3.0.7/drivers/atm/nicstar.c
23818--- linux-3.0.7/drivers/atm/nicstar.c 2011-07-21 22:17:23.000000000 -0400
23819+++ linux-3.0.7/drivers/atm/nicstar.c 2011-08-23 21:47:55.000000000 -0400
23820@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
23821 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
23822 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
23823 card->index);
23824- atomic_inc(&vcc->stats->tx_err);
23825+ atomic_inc_unchecked(&vcc->stats->tx_err);
23826 dev_kfree_skb_any(skb);
23827 return -EINVAL;
23828 }
23829@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
23830 if (!vc->tx) {
23831 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
23832 card->index);
23833- atomic_inc(&vcc->stats->tx_err);
23834+ atomic_inc_unchecked(&vcc->stats->tx_err);
23835 dev_kfree_skb_any(skb);
23836 return -EINVAL;
23837 }
23838@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
23839 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
23840 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
23841 card->index);
23842- atomic_inc(&vcc->stats->tx_err);
23843+ atomic_inc_unchecked(&vcc->stats->tx_err);
23844 dev_kfree_skb_any(skb);
23845 return -EINVAL;
23846 }
23847
23848 if (skb_shinfo(skb)->nr_frags != 0) {
23849 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23850- atomic_inc(&vcc->stats->tx_err);
23851+ atomic_inc_unchecked(&vcc->stats->tx_err);
23852 dev_kfree_skb_any(skb);
23853 return -EINVAL;
23854 }
23855@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
23856 }
23857
23858 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
23859- atomic_inc(&vcc->stats->tx_err);
23860+ atomic_inc_unchecked(&vcc->stats->tx_err);
23861 dev_kfree_skb_any(skb);
23862 return -EIO;
23863 }
23864- atomic_inc(&vcc->stats->tx);
23865+ atomic_inc_unchecked(&vcc->stats->tx);
23866
23867 return 0;
23868 }
23869@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
23870 printk
23871 ("nicstar%d: Can't allocate buffers for aal0.\n",
23872 card->index);
23873- atomic_add(i, &vcc->stats->rx_drop);
23874+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
23875 break;
23876 }
23877 if (!atm_charge(vcc, sb->truesize)) {
23878 RXPRINTK
23879 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
23880 card->index);
23881- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23882+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
23883 dev_kfree_skb_any(sb);
23884 break;
23885 }
23886@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
23887 ATM_SKB(sb)->vcc = vcc;
23888 __net_timestamp(sb);
23889 vcc->push(vcc, sb);
23890- atomic_inc(&vcc->stats->rx);
23891+ atomic_inc_unchecked(&vcc->stats->rx);
23892 cell += ATM_CELL_PAYLOAD;
23893 }
23894
23895@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
23896 if (iovb == NULL) {
23897 printk("nicstar%d: Out of iovec buffers.\n",
23898 card->index);
23899- atomic_inc(&vcc->stats->rx_drop);
23900+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23901 recycle_rx_buf(card, skb);
23902 return;
23903 }
23904@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
23905 small or large buffer itself. */
23906 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
23907 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23908- atomic_inc(&vcc->stats->rx_err);
23909+ atomic_inc_unchecked(&vcc->stats->rx_err);
23910 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23911 NS_MAX_IOVECS);
23912 NS_PRV_IOVCNT(iovb) = 0;
23913@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
23914 ("nicstar%d: Expected a small buffer, and this is not one.\n",
23915 card->index);
23916 which_list(card, skb);
23917- atomic_inc(&vcc->stats->rx_err);
23918+ atomic_inc_unchecked(&vcc->stats->rx_err);
23919 recycle_rx_buf(card, skb);
23920 vc->rx_iov = NULL;
23921 recycle_iov_buf(card, iovb);
23922@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
23923 ("nicstar%d: Expected a large buffer, and this is not one.\n",
23924 card->index);
23925 which_list(card, skb);
23926- atomic_inc(&vcc->stats->rx_err);
23927+ atomic_inc_unchecked(&vcc->stats->rx_err);
23928 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23929 NS_PRV_IOVCNT(iovb));
23930 vc->rx_iov = NULL;
23931@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23932 printk(" - PDU size mismatch.\n");
23933 else
23934 printk(".\n");
23935- atomic_inc(&vcc->stats->rx_err);
23936+ atomic_inc_unchecked(&vcc->stats->rx_err);
23937 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23938 NS_PRV_IOVCNT(iovb));
23939 vc->rx_iov = NULL;
23940@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23941 /* skb points to a small buffer */
23942 if (!atm_charge(vcc, skb->truesize)) {
23943 push_rxbufs(card, skb);
23944- atomic_inc(&vcc->stats->rx_drop);
23945+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23946 } else {
23947 skb_put(skb, len);
23948 dequeue_sm_buf(card, skb);
23949@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23950 ATM_SKB(skb)->vcc = vcc;
23951 __net_timestamp(skb);
23952 vcc->push(vcc, skb);
23953- atomic_inc(&vcc->stats->rx);
23954+ atomic_inc_unchecked(&vcc->stats->rx);
23955 }
23956 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23957 struct sk_buff *sb;
23958@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23959 if (len <= NS_SMBUFSIZE) {
23960 if (!atm_charge(vcc, sb->truesize)) {
23961 push_rxbufs(card, sb);
23962- atomic_inc(&vcc->stats->rx_drop);
23963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23964 } else {
23965 skb_put(sb, len);
23966 dequeue_sm_buf(card, sb);
23967@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23968 ATM_SKB(sb)->vcc = vcc;
23969 __net_timestamp(sb);
23970 vcc->push(vcc, sb);
23971- atomic_inc(&vcc->stats->rx);
23972+ atomic_inc_unchecked(&vcc->stats->rx);
23973 }
23974
23975 push_rxbufs(card, skb);
23976@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23977
23978 if (!atm_charge(vcc, skb->truesize)) {
23979 push_rxbufs(card, skb);
23980- atomic_inc(&vcc->stats->rx_drop);
23981+ atomic_inc_unchecked(&vcc->stats->rx_drop);
23982 } else {
23983 dequeue_lg_buf(card, skb);
23984 #ifdef NS_USE_DESTRUCTORS
23985@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23986 ATM_SKB(skb)->vcc = vcc;
23987 __net_timestamp(skb);
23988 vcc->push(vcc, skb);
23989- atomic_inc(&vcc->stats->rx);
23990+ atomic_inc_unchecked(&vcc->stats->rx);
23991 }
23992
23993 push_rxbufs(card, sb);
23994@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23995 printk
23996 ("nicstar%d: Out of huge buffers.\n",
23997 card->index);
23998- atomic_inc(&vcc->stats->rx_drop);
23999+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24000 recycle_iovec_rx_bufs(card,
24001 (struct iovec *)
24002 iovb->data,
24003@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24004 card->hbpool.count++;
24005 } else
24006 dev_kfree_skb_any(hb);
24007- atomic_inc(&vcc->stats->rx_drop);
24008+ atomic_inc_unchecked(&vcc->stats->rx_drop);
24009 } else {
24010 /* Copy the small buffer to the huge buffer */
24011 sb = (struct sk_buff *)iov->iov_base;
24012@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24013 #endif /* NS_USE_DESTRUCTORS */
24014 __net_timestamp(hb);
24015 vcc->push(vcc, hb);
24016- atomic_inc(&vcc->stats->rx);
24017+ atomic_inc_unchecked(&vcc->stats->rx);
24018 }
24019 }
24020
24021diff -urNp linux-3.0.7/drivers/atm/solos-pci.c linux-3.0.7/drivers/atm/solos-pci.c
24022--- linux-3.0.7/drivers/atm/solos-pci.c 2011-07-21 22:17:23.000000000 -0400
24023+++ linux-3.0.7/drivers/atm/solos-pci.c 2011-08-23 21:48:14.000000000 -0400
24024@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24025 }
24026 atm_charge(vcc, skb->truesize);
24027 vcc->push(vcc, skb);
24028- atomic_inc(&vcc->stats->rx);
24029+ atomic_inc_unchecked(&vcc->stats->rx);
24030 break;
24031
24032 case PKT_STATUS:
24033@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24034 char msg[500];
24035 char item[10];
24036
24037+ pax_track_stack();
24038+
24039 len = buf->len;
24040 for (i = 0; i < len; i++){
24041 if(i % 8 == 0)
24042@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24043 vcc = SKB_CB(oldskb)->vcc;
24044
24045 if (vcc) {
24046- atomic_inc(&vcc->stats->tx);
24047+ atomic_inc_unchecked(&vcc->stats->tx);
24048 solos_pop(vcc, oldskb);
24049 } else
24050 dev_kfree_skb_irq(oldskb);
24051diff -urNp linux-3.0.7/drivers/atm/suni.c linux-3.0.7/drivers/atm/suni.c
24052--- linux-3.0.7/drivers/atm/suni.c 2011-07-21 22:17:23.000000000 -0400
24053+++ linux-3.0.7/drivers/atm/suni.c 2011-08-23 21:47:55.000000000 -0400
24054@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24055
24056
24057 #define ADD_LIMITED(s,v) \
24058- atomic_add((v),&stats->s); \
24059- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24060+ atomic_add_unchecked((v),&stats->s); \
24061+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24062
24063
24064 static void suni_hz(unsigned long from_timer)
24065diff -urNp linux-3.0.7/drivers/atm/uPD98402.c linux-3.0.7/drivers/atm/uPD98402.c
24066--- linux-3.0.7/drivers/atm/uPD98402.c 2011-07-21 22:17:23.000000000 -0400
24067+++ linux-3.0.7/drivers/atm/uPD98402.c 2011-08-23 21:47:55.000000000 -0400
24068@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24069 struct sonet_stats tmp;
24070 int error = 0;
24071
24072- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24073+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24074 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24075 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24076 if (zero && !error) {
24077@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24078
24079
24080 #define ADD_LIMITED(s,v) \
24081- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24082- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24083- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24084+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24085+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24086+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24087
24088
24089 static void stat_event(struct atm_dev *dev)
24090@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24091 if (reason & uPD98402_INT_PFM) stat_event(dev);
24092 if (reason & uPD98402_INT_PCO) {
24093 (void) GET(PCOCR); /* clear interrupt cause */
24094- atomic_add(GET(HECCT),
24095+ atomic_add_unchecked(GET(HECCT),
24096 &PRIV(dev)->sonet_stats.uncorr_hcs);
24097 }
24098 if ((reason & uPD98402_INT_RFO) &&
24099@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24100 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24101 uPD98402_INT_LOS),PIMR); /* enable them */
24102 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24103- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24104- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24105- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24106+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24107+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24108+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24109 return 0;
24110 }
24111
24112diff -urNp linux-3.0.7/drivers/atm/zatm.c linux-3.0.7/drivers/atm/zatm.c
24113--- linux-3.0.7/drivers/atm/zatm.c 2011-07-21 22:17:23.000000000 -0400
24114+++ linux-3.0.7/drivers/atm/zatm.c 2011-08-23 21:47:55.000000000 -0400
24115@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24116 }
24117 if (!size) {
24118 dev_kfree_skb_irq(skb);
24119- if (vcc) atomic_inc(&vcc->stats->rx_err);
24120+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24121 continue;
24122 }
24123 if (!atm_charge(vcc,skb->truesize)) {
24124@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24125 skb->len = size;
24126 ATM_SKB(skb)->vcc = vcc;
24127 vcc->push(vcc,skb);
24128- atomic_inc(&vcc->stats->rx);
24129+ atomic_inc_unchecked(&vcc->stats->rx);
24130 }
24131 zout(pos & 0xffff,MTA(mbx));
24132 #if 0 /* probably a stupid idea */
24133@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24134 skb_queue_head(&zatm_vcc->backlog,skb);
24135 break;
24136 }
24137- atomic_inc(&vcc->stats->tx);
24138+ atomic_inc_unchecked(&vcc->stats->tx);
24139 wake_up(&zatm_vcc->tx_wait);
24140 }
24141
24142diff -urNp linux-3.0.7/drivers/base/devtmpfs.c linux-3.0.7/drivers/base/devtmpfs.c
24143--- linux-3.0.7/drivers/base/devtmpfs.c 2011-07-21 22:17:23.000000000 -0400
24144+++ linux-3.0.7/drivers/base/devtmpfs.c 2011-10-06 04:17:55.000000000 -0400
24145@@ -357,7 +357,7 @@ int devtmpfs_mount(const char *mntdir)
24146 if (!dev_mnt)
24147 return 0;
24148
24149- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24150+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24151 if (err)
24152 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24153 else
24154diff -urNp linux-3.0.7/drivers/base/power/wakeup.c linux-3.0.7/drivers/base/power/wakeup.c
24155--- linux-3.0.7/drivers/base/power/wakeup.c 2011-07-21 22:17:23.000000000 -0400
24156+++ linux-3.0.7/drivers/base/power/wakeup.c 2011-08-23 21:47:55.000000000 -0400
24157@@ -29,14 +29,14 @@ bool events_check_enabled;
24158 * They need to be modified together atomically, so it's better to use one
24159 * atomic variable to hold them both.
24160 */
24161-static atomic_t combined_event_count = ATOMIC_INIT(0);
24162+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24163
24164 #define IN_PROGRESS_BITS (sizeof(int) * 4)
24165 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24166
24167 static void split_counters(unsigned int *cnt, unsigned int *inpr)
24168 {
24169- unsigned int comb = atomic_read(&combined_event_count);
24170+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24171
24172 *cnt = (comb >> IN_PROGRESS_BITS);
24173 *inpr = comb & MAX_IN_PROGRESS;
24174@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24175 ws->last_time = ktime_get();
24176
24177 /* Increment the counter of events in progress. */
24178- atomic_inc(&combined_event_count);
24179+ atomic_inc_unchecked(&combined_event_count);
24180 }
24181
24182 /**
24183@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24184 * Increment the counter of registered wakeup events and decrement the
24185 * couter of wakeup events in progress simultaneously.
24186 */
24187- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24188+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24189 }
24190
24191 /**
24192diff -urNp linux-3.0.7/drivers/block/cciss.c linux-3.0.7/drivers/block/cciss.c
24193--- linux-3.0.7/drivers/block/cciss.c 2011-07-21 22:17:23.000000000 -0400
24194+++ linux-3.0.7/drivers/block/cciss.c 2011-08-23 21:48:14.000000000 -0400
24195@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24196 int err;
24197 u32 cp;
24198
24199+ memset(&arg64, 0, sizeof(arg64));
24200+
24201 err = 0;
24202 err |=
24203 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24204@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24205 while (!list_empty(&h->reqQ)) {
24206 c = list_entry(h->reqQ.next, CommandList_struct, list);
24207 /* can't do anything if fifo is full */
24208- if ((h->access.fifo_full(h))) {
24209+ if ((h->access->fifo_full(h))) {
24210 dev_warn(&h->pdev->dev, "fifo full\n");
24211 break;
24212 }
24213@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24214 h->Qdepth--;
24215
24216 /* Tell the controller execute command */
24217- h->access.submit_command(h, c);
24218+ h->access->submit_command(h, c);
24219
24220 /* Put job onto the completed Q */
24221 addQ(&h->cmpQ, c);
24222@@ -3422,17 +3424,17 @@ startio:
24223
24224 static inline unsigned long get_next_completion(ctlr_info_t *h)
24225 {
24226- return h->access.command_completed(h);
24227+ return h->access->command_completed(h);
24228 }
24229
24230 static inline int interrupt_pending(ctlr_info_t *h)
24231 {
24232- return h->access.intr_pending(h);
24233+ return h->access->intr_pending(h);
24234 }
24235
24236 static inline long interrupt_not_for_us(ctlr_info_t *h)
24237 {
24238- return ((h->access.intr_pending(h) == 0) ||
24239+ return ((h->access->intr_pending(h) == 0) ||
24240 (h->interrupts_enabled == 0));
24241 }
24242
24243@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24244 u32 a;
24245
24246 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24247- return h->access.command_completed(h);
24248+ return h->access->command_completed(h);
24249
24250 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24251 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24252@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24253 trans_support & CFGTBL_Trans_use_short_tags);
24254
24255 /* Change the access methods to the performant access methods */
24256- h->access = SA5_performant_access;
24257+ h->access = &SA5_performant_access;
24258 h->transMethod = CFGTBL_Trans_Performant;
24259
24260 return;
24261@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24262 if (prod_index < 0)
24263 return -ENODEV;
24264 h->product_name = products[prod_index].product_name;
24265- h->access = *(products[prod_index].access);
24266+ h->access = products[prod_index].access;
24267
24268 if (cciss_board_disabled(h)) {
24269 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24270@@ -5002,7 +5004,7 @@ reinit_after_soft_reset:
24271 }
24272
24273 /* make sure the board interrupts are off */
24274- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24275+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24276 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24277 if (rc)
24278 goto clean2;
24279@@ -5054,7 +5056,7 @@ reinit_after_soft_reset:
24280 * fake ones to scoop up any residual completions.
24281 */
24282 spin_lock_irqsave(&h->lock, flags);
24283- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24284+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24285 spin_unlock_irqrestore(&h->lock, flags);
24286 free_irq(h->intr[PERF_MODE_INT], h);
24287 rc = cciss_request_irq(h, cciss_msix_discard_completions,
24288@@ -5074,9 +5076,9 @@ reinit_after_soft_reset:
24289 dev_info(&h->pdev->dev, "Board READY.\n");
24290 dev_info(&h->pdev->dev,
24291 "Waiting for stale completions to drain.\n");
24292- h->access.set_intr_mask(h, CCISS_INTR_ON);
24293+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24294 msleep(10000);
24295- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24296+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24297
24298 rc = controller_reset_failed(h->cfgtable);
24299 if (rc)
24300@@ -5099,7 +5101,7 @@ reinit_after_soft_reset:
24301 cciss_scsi_setup(h);
24302
24303 /* Turn the interrupts on so we can service requests */
24304- h->access.set_intr_mask(h, CCISS_INTR_ON);
24305+ h->access->set_intr_mask(h, CCISS_INTR_ON);
24306
24307 /* Get the firmware version */
24308 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24309@@ -5171,7 +5173,7 @@ static void cciss_shutdown(struct pci_de
24310 kfree(flush_buf);
24311 if (return_code != IO_OK)
24312 dev_warn(&h->pdev->dev, "Error flushing cache\n");
24313- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24314+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24315 free_irq(h->intr[PERF_MODE_INT], h);
24316 }
24317
24318diff -urNp linux-3.0.7/drivers/block/cciss.h linux-3.0.7/drivers/block/cciss.h
24319--- linux-3.0.7/drivers/block/cciss.h 2011-09-02 18:11:21.000000000 -0400
24320+++ linux-3.0.7/drivers/block/cciss.h 2011-08-23 21:47:55.000000000 -0400
24321@@ -100,7 +100,7 @@ struct ctlr_info
24322 /* information about each logical volume */
24323 drive_info_struct *drv[CISS_MAX_LUN];
24324
24325- struct access_method access;
24326+ struct access_method *access;
24327
24328 /* queue and queue Info */
24329 struct list_head reqQ;
24330diff -urNp linux-3.0.7/drivers/block/cpqarray.c linux-3.0.7/drivers/block/cpqarray.c
24331--- linux-3.0.7/drivers/block/cpqarray.c 2011-07-21 22:17:23.000000000 -0400
24332+++ linux-3.0.7/drivers/block/cpqarray.c 2011-08-23 21:48:14.000000000 -0400
24333@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24334 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24335 goto Enomem4;
24336 }
24337- hba[i]->access.set_intr_mask(hba[i], 0);
24338+ hba[i]->access->set_intr_mask(hba[i], 0);
24339 if (request_irq(hba[i]->intr, do_ida_intr,
24340 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24341 {
24342@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24343 add_timer(&hba[i]->timer);
24344
24345 /* Enable IRQ now that spinlock and rate limit timer are set up */
24346- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24347+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24348
24349 for(j=0; j<NWD; j++) {
24350 struct gendisk *disk = ida_gendisk[i][j];
24351@@ -694,7 +694,7 @@ DBGINFO(
24352 for(i=0; i<NR_PRODUCTS; i++) {
24353 if (board_id == products[i].board_id) {
24354 c->product_name = products[i].product_name;
24355- c->access = *(products[i].access);
24356+ c->access = products[i].access;
24357 break;
24358 }
24359 }
24360@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24361 hba[ctlr]->intr = intr;
24362 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24363 hba[ctlr]->product_name = products[j].product_name;
24364- hba[ctlr]->access = *(products[j].access);
24365+ hba[ctlr]->access = products[j].access;
24366 hba[ctlr]->ctlr = ctlr;
24367 hba[ctlr]->board_id = board_id;
24368 hba[ctlr]->pci_dev = NULL; /* not PCI */
24369@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24370 struct scatterlist tmp_sg[SG_MAX];
24371 int i, dir, seg;
24372
24373+ pax_track_stack();
24374+
24375 queue_next:
24376 creq = blk_peek_request(q);
24377 if (!creq)
24378@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24379
24380 while((c = h->reqQ) != NULL) {
24381 /* Can't do anything if we're busy */
24382- if (h->access.fifo_full(h) == 0)
24383+ if (h->access->fifo_full(h) == 0)
24384 return;
24385
24386 /* Get the first entry from the request Q */
24387@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24388 h->Qdepth--;
24389
24390 /* Tell the controller to do our bidding */
24391- h->access.submit_command(h, c);
24392+ h->access->submit_command(h, c);
24393
24394 /* Get onto the completion Q */
24395 addQ(&h->cmpQ, c);
24396@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24397 unsigned long flags;
24398 __u32 a,a1;
24399
24400- istat = h->access.intr_pending(h);
24401+ istat = h->access->intr_pending(h);
24402 /* Is this interrupt for us? */
24403 if (istat == 0)
24404 return IRQ_NONE;
24405@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24406 */
24407 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24408 if (istat & FIFO_NOT_EMPTY) {
24409- while((a = h->access.command_completed(h))) {
24410+ while((a = h->access->command_completed(h))) {
24411 a1 = a; a &= ~3;
24412 if ((c = h->cmpQ) == NULL)
24413 {
24414@@ -1449,11 +1451,11 @@ static int sendcmd(
24415 /*
24416 * Disable interrupt
24417 */
24418- info_p->access.set_intr_mask(info_p, 0);
24419+ info_p->access->set_intr_mask(info_p, 0);
24420 /* Make sure there is room in the command FIFO */
24421 /* Actually it should be completely empty at this time. */
24422 for (i = 200000; i > 0; i--) {
24423- temp = info_p->access.fifo_full(info_p);
24424+ temp = info_p->access->fifo_full(info_p);
24425 if (temp != 0) {
24426 break;
24427 }
24428@@ -1466,7 +1468,7 @@ DBG(
24429 /*
24430 * Send the cmd
24431 */
24432- info_p->access.submit_command(info_p, c);
24433+ info_p->access->submit_command(info_p, c);
24434 complete = pollcomplete(ctlr);
24435
24436 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
24437@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
24438 * we check the new geometry. Then turn interrupts back on when
24439 * we're done.
24440 */
24441- host->access.set_intr_mask(host, 0);
24442+ host->access->set_intr_mask(host, 0);
24443 getgeometry(ctlr);
24444- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
24445+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
24446
24447 for(i=0; i<NWD; i++) {
24448 struct gendisk *disk = ida_gendisk[ctlr][i];
24449@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
24450 /* Wait (up to 2 seconds) for a command to complete */
24451
24452 for (i = 200000; i > 0; i--) {
24453- done = hba[ctlr]->access.command_completed(hba[ctlr]);
24454+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
24455 if (done == 0) {
24456 udelay(10); /* a short fixed delay */
24457 } else
24458diff -urNp linux-3.0.7/drivers/block/cpqarray.h linux-3.0.7/drivers/block/cpqarray.h
24459--- linux-3.0.7/drivers/block/cpqarray.h 2011-07-21 22:17:23.000000000 -0400
24460+++ linux-3.0.7/drivers/block/cpqarray.h 2011-08-23 21:47:55.000000000 -0400
24461@@ -99,7 +99,7 @@ struct ctlr_info {
24462 drv_info_t drv[NWD];
24463 struct proc_dir_entry *proc;
24464
24465- struct access_method access;
24466+ struct access_method *access;
24467
24468 cmdlist_t *reqQ;
24469 cmdlist_t *cmpQ;
24470diff -urNp linux-3.0.7/drivers/block/DAC960.c linux-3.0.7/drivers/block/DAC960.c
24471--- linux-3.0.7/drivers/block/DAC960.c 2011-07-21 22:17:23.000000000 -0400
24472+++ linux-3.0.7/drivers/block/DAC960.c 2011-08-23 21:48:14.000000000 -0400
24473@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
24474 unsigned long flags;
24475 int Channel, TargetID;
24476
24477+ pax_track_stack();
24478+
24479 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
24480 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
24481 sizeof(DAC960_SCSI_Inquiry_T) +
24482diff -urNp linux-3.0.7/drivers/block/drbd/drbd_int.h linux-3.0.7/drivers/block/drbd/drbd_int.h
24483--- linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-07-21 22:17:23.000000000 -0400
24484+++ linux-3.0.7/drivers/block/drbd/drbd_int.h 2011-10-06 04:17:55.000000000 -0400
24485@@ -737,7 +737,7 @@ struct drbd_request;
24486 struct drbd_epoch {
24487 struct list_head list;
24488 unsigned int barrier_nr;
24489- atomic_t epoch_size; /* increased on every request added. */
24490+ atomic_unchecked_t epoch_size; /* increased on every request added. */
24491 atomic_t active; /* increased on every req. added, and dec on every finished. */
24492 unsigned long flags;
24493 };
24494@@ -1109,7 +1109,7 @@ struct drbd_conf {
24495 void *int_dig_in;
24496 void *int_dig_vv;
24497 wait_queue_head_t seq_wait;
24498- atomic_t packet_seq;
24499+ atomic_unchecked_t packet_seq;
24500 unsigned int peer_seq;
24501 spinlock_t peer_seq_lock;
24502 unsigned int minor;
24503@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
24504
24505 static inline void drbd_tcp_cork(struct socket *sock)
24506 {
24507- int __user val = 1;
24508+ int val = 1;
24509 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24510- (char __user *)&val, sizeof(val));
24511+ (char __force_user *)&val, sizeof(val));
24512 }
24513
24514 static inline void drbd_tcp_uncork(struct socket *sock)
24515 {
24516- int __user val = 0;
24517+ int val = 0;
24518 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
24519- (char __user *)&val, sizeof(val));
24520+ (char __force_user *)&val, sizeof(val));
24521 }
24522
24523 static inline void drbd_tcp_nodelay(struct socket *sock)
24524 {
24525- int __user val = 1;
24526+ int val = 1;
24527 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
24528- (char __user *)&val, sizeof(val));
24529+ (char __force_user *)&val, sizeof(val));
24530 }
24531
24532 static inline void drbd_tcp_quickack(struct socket *sock)
24533 {
24534- int __user val = 2;
24535+ int val = 2;
24536 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
24537- (char __user *)&val, sizeof(val));
24538+ (char __force_user *)&val, sizeof(val));
24539 }
24540
24541 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
24542diff -urNp linux-3.0.7/drivers/block/drbd/drbd_main.c linux-3.0.7/drivers/block/drbd/drbd_main.c
24543--- linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-07-21 22:17:23.000000000 -0400
24544+++ linux-3.0.7/drivers/block/drbd/drbd_main.c 2011-08-23 21:47:55.000000000 -0400
24545@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
24546 p.sector = sector;
24547 p.block_id = block_id;
24548 p.blksize = blksize;
24549- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
24550+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
24551
24552 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
24553 return false;
24554@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
24555 p.sector = cpu_to_be64(req->sector);
24556 p.block_id = (unsigned long)req;
24557 p.seq_num = cpu_to_be32(req->seq_num =
24558- atomic_add_return(1, &mdev->packet_seq));
24559+ atomic_add_return_unchecked(1, &mdev->packet_seq));
24560
24561 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
24562
24563@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
24564 atomic_set(&mdev->unacked_cnt, 0);
24565 atomic_set(&mdev->local_cnt, 0);
24566 atomic_set(&mdev->net_cnt, 0);
24567- atomic_set(&mdev->packet_seq, 0);
24568+ atomic_set_unchecked(&mdev->packet_seq, 0);
24569 atomic_set(&mdev->pp_in_use, 0);
24570 atomic_set(&mdev->pp_in_use_by_net, 0);
24571 atomic_set(&mdev->rs_sect_in, 0);
24572@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
24573 mdev->receiver.t_state);
24574
24575 /* no need to lock it, I'm the only thread alive */
24576- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
24577- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
24578+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
24579+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
24580 mdev->al_writ_cnt =
24581 mdev->bm_writ_cnt =
24582 mdev->read_cnt =
24583diff -urNp linux-3.0.7/drivers/block/drbd/drbd_nl.c linux-3.0.7/drivers/block/drbd/drbd_nl.c
24584--- linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-07-21 22:17:23.000000000 -0400
24585+++ linux-3.0.7/drivers/block/drbd/drbd_nl.c 2011-08-23 21:47:55.000000000 -0400
24586@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
24587 module_put(THIS_MODULE);
24588 }
24589
24590-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24591+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
24592
24593 static unsigned short *
24594 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
24595@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
24596 cn_reply->id.idx = CN_IDX_DRBD;
24597 cn_reply->id.val = CN_VAL_DRBD;
24598
24599- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24600+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24601 cn_reply->ack = 0; /* not used here. */
24602 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24603 (int)((char *)tl - (char *)reply->tag_list);
24604@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
24605 cn_reply->id.idx = CN_IDX_DRBD;
24606 cn_reply->id.val = CN_VAL_DRBD;
24607
24608- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24609+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24610 cn_reply->ack = 0; /* not used here. */
24611 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24612 (int)((char *)tl - (char *)reply->tag_list);
24613@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
24614 cn_reply->id.idx = CN_IDX_DRBD;
24615 cn_reply->id.val = CN_VAL_DRBD;
24616
24617- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
24618+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
24619 cn_reply->ack = 0; // not used here.
24620 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24621 (int)((char*)tl - (char*)reply->tag_list);
24622@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
24623 cn_reply->id.idx = CN_IDX_DRBD;
24624 cn_reply->id.val = CN_VAL_DRBD;
24625
24626- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
24627+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
24628 cn_reply->ack = 0; /* not used here. */
24629 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
24630 (int)((char *)tl - (char *)reply->tag_list);
24631diff -urNp linux-3.0.7/drivers/block/drbd/drbd_receiver.c linux-3.0.7/drivers/block/drbd/drbd_receiver.c
24632--- linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-07-21 22:17:23.000000000 -0400
24633+++ linux-3.0.7/drivers/block/drbd/drbd_receiver.c 2011-08-23 21:47:55.000000000 -0400
24634@@ -894,7 +894,7 @@ retry:
24635 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
24636 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
24637
24638- atomic_set(&mdev->packet_seq, 0);
24639+ atomic_set_unchecked(&mdev->packet_seq, 0);
24640 mdev->peer_seq = 0;
24641
24642 drbd_thread_start(&mdev->asender);
24643@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
24644 do {
24645 next_epoch = NULL;
24646
24647- epoch_size = atomic_read(&epoch->epoch_size);
24648+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
24649
24650 switch (ev & ~EV_CLEANUP) {
24651 case EV_PUT:
24652@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
24653 rv = FE_DESTROYED;
24654 } else {
24655 epoch->flags = 0;
24656- atomic_set(&epoch->epoch_size, 0);
24657+ atomic_set_unchecked(&epoch->epoch_size, 0);
24658 /* atomic_set(&epoch->active, 0); is already zero */
24659 if (rv == FE_STILL_LIVE)
24660 rv = FE_RECYCLED;
24661@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
24662 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
24663 drbd_flush(mdev);
24664
24665- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24666+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24667 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
24668 if (epoch)
24669 break;
24670 }
24671
24672 epoch = mdev->current_epoch;
24673- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
24674+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
24675
24676 D_ASSERT(atomic_read(&epoch->active) == 0);
24677 D_ASSERT(epoch->flags == 0);
24678@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
24679 }
24680
24681 epoch->flags = 0;
24682- atomic_set(&epoch->epoch_size, 0);
24683+ atomic_set_unchecked(&epoch->epoch_size, 0);
24684 atomic_set(&epoch->active, 0);
24685
24686 spin_lock(&mdev->epoch_lock);
24687- if (atomic_read(&mdev->current_epoch->epoch_size)) {
24688+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
24689 list_add(&epoch->list, &mdev->current_epoch->list);
24690 mdev->current_epoch = epoch;
24691 mdev->epochs++;
24692@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
24693 spin_unlock(&mdev->peer_seq_lock);
24694
24695 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
24696- atomic_inc(&mdev->current_epoch->epoch_size);
24697+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
24698 return drbd_drain_block(mdev, data_size);
24699 }
24700
24701@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
24702
24703 spin_lock(&mdev->epoch_lock);
24704 e->epoch = mdev->current_epoch;
24705- atomic_inc(&e->epoch->epoch_size);
24706+ atomic_inc_unchecked(&e->epoch->epoch_size);
24707 atomic_inc(&e->epoch->active);
24708 spin_unlock(&mdev->epoch_lock);
24709
24710@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
24711 D_ASSERT(list_empty(&mdev->done_ee));
24712
24713 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
24714- atomic_set(&mdev->current_epoch->epoch_size, 0);
24715+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
24716 D_ASSERT(list_empty(&mdev->current_epoch->list));
24717 }
24718
24719diff -urNp linux-3.0.7/drivers/block/loop.c linux-3.0.7/drivers/block/loop.c
24720--- linux-3.0.7/drivers/block/loop.c 2011-09-02 18:11:26.000000000 -0400
24721+++ linux-3.0.7/drivers/block/loop.c 2011-10-06 04:17:55.000000000 -0400
24722@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
24723 mm_segment_t old_fs = get_fs();
24724
24725 set_fs(get_ds());
24726- bw = file->f_op->write(file, buf, len, &pos);
24727+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
24728 set_fs(old_fs);
24729 if (likely(bw == len))
24730 return 0;
24731diff -urNp linux-3.0.7/drivers/block/nbd.c linux-3.0.7/drivers/block/nbd.c
24732--- linux-3.0.7/drivers/block/nbd.c 2011-07-21 22:17:23.000000000 -0400
24733+++ linux-3.0.7/drivers/block/nbd.c 2011-08-23 21:48:14.000000000 -0400
24734@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
24735 struct kvec iov;
24736 sigset_t blocked, oldset;
24737
24738+ pax_track_stack();
24739+
24740 if (unlikely(!sock)) {
24741 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
24742 lo->disk->disk_name, (send ? "send" : "recv"));
24743@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
24744 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
24745 unsigned int cmd, unsigned long arg)
24746 {
24747+ pax_track_stack();
24748+
24749 switch (cmd) {
24750 case NBD_DISCONNECT: {
24751 struct request sreq;
24752diff -urNp linux-3.0.7/drivers/char/agp/frontend.c linux-3.0.7/drivers/char/agp/frontend.c
24753--- linux-3.0.7/drivers/char/agp/frontend.c 2011-07-21 22:17:23.000000000 -0400
24754+++ linux-3.0.7/drivers/char/agp/frontend.c 2011-08-23 21:47:55.000000000 -0400
24755@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
24756 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
24757 return -EFAULT;
24758
24759- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
24760+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
24761 return -EFAULT;
24762
24763 client = agp_find_client_by_pid(reserve.pid);
24764diff -urNp linux-3.0.7/drivers/char/briq_panel.c linux-3.0.7/drivers/char/briq_panel.c
24765--- linux-3.0.7/drivers/char/briq_panel.c 2011-07-21 22:17:23.000000000 -0400
24766+++ linux-3.0.7/drivers/char/briq_panel.c 2011-08-23 21:48:14.000000000 -0400
24767@@ -9,6 +9,7 @@
24768 #include <linux/types.h>
24769 #include <linux/errno.h>
24770 #include <linux/tty.h>
24771+#include <linux/mutex.h>
24772 #include <linux/timer.h>
24773 #include <linux/kernel.h>
24774 #include <linux/wait.h>
24775@@ -34,6 +35,7 @@ static int vfd_is_open;
24776 static unsigned char vfd[40];
24777 static int vfd_cursor;
24778 static unsigned char ledpb, led;
24779+static DEFINE_MUTEX(vfd_mutex);
24780
24781 static void update_vfd(void)
24782 {
24783@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
24784 if (!vfd_is_open)
24785 return -EBUSY;
24786
24787+ mutex_lock(&vfd_mutex);
24788 for (;;) {
24789 char c;
24790 if (!indx)
24791 break;
24792- if (get_user(c, buf))
24793+ if (get_user(c, buf)) {
24794+ mutex_unlock(&vfd_mutex);
24795 return -EFAULT;
24796+ }
24797 if (esc) {
24798 set_led(c);
24799 esc = 0;
24800@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
24801 buf++;
24802 }
24803 update_vfd();
24804+ mutex_unlock(&vfd_mutex);
24805
24806 return len;
24807 }
24808diff -urNp linux-3.0.7/drivers/char/genrtc.c linux-3.0.7/drivers/char/genrtc.c
24809--- linux-3.0.7/drivers/char/genrtc.c 2011-07-21 22:17:23.000000000 -0400
24810+++ linux-3.0.7/drivers/char/genrtc.c 2011-08-23 21:48:14.000000000 -0400
24811@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
24812 switch (cmd) {
24813
24814 case RTC_PLL_GET:
24815+ memset(&pll, 0, sizeof(pll));
24816 if (get_rtc_pll(&pll))
24817 return -EINVAL;
24818 else
24819diff -urNp linux-3.0.7/drivers/char/hpet.c linux-3.0.7/drivers/char/hpet.c
24820--- linux-3.0.7/drivers/char/hpet.c 2011-07-21 22:17:23.000000000 -0400
24821+++ linux-3.0.7/drivers/char/hpet.c 2011-08-23 21:47:55.000000000 -0400
24822@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
24823 }
24824
24825 static int
24826-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
24827+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
24828 struct hpet_info *info)
24829 {
24830 struct hpet_timer __iomem *timer;
24831diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c
24832--- linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-07-21 22:17:23.000000000 -0400
24833+++ linux-3.0.7/drivers/char/ipmi/ipmi_msghandler.c 2011-08-23 21:48:14.000000000 -0400
24834@@ -415,7 +415,7 @@ struct ipmi_smi {
24835 struct proc_dir_entry *proc_dir;
24836 char proc_dir_name[10];
24837
24838- atomic_t stats[IPMI_NUM_STATS];
24839+ atomic_unchecked_t stats[IPMI_NUM_STATS];
24840
24841 /*
24842 * run_to_completion duplicate of smb_info, smi_info
24843@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
24844
24845
24846 #define ipmi_inc_stat(intf, stat) \
24847- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
24848+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
24849 #define ipmi_get_stat(intf, stat) \
24850- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
24851+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
24852
24853 static int is_lan_addr(struct ipmi_addr *addr)
24854 {
24855@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
24856 INIT_LIST_HEAD(&intf->cmd_rcvrs);
24857 init_waitqueue_head(&intf->waitq);
24858 for (i = 0; i < IPMI_NUM_STATS; i++)
24859- atomic_set(&intf->stats[i], 0);
24860+ atomic_set_unchecked(&intf->stats[i], 0);
24861
24862 intf->proc_dir = NULL;
24863
24864@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
24865 struct ipmi_smi_msg smi_msg;
24866 struct ipmi_recv_msg recv_msg;
24867
24868+ pax_track_stack();
24869+
24870 si = (struct ipmi_system_interface_addr *) &addr;
24871 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
24872 si->channel = IPMI_BMC_CHANNEL;
24873diff -urNp linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c
24874--- linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-07-21 22:17:23.000000000 -0400
24875+++ linux-3.0.7/drivers/char/ipmi/ipmi_si_intf.c 2011-08-23 21:47:55.000000000 -0400
24876@@ -277,7 +277,7 @@ struct smi_info {
24877 unsigned char slave_addr;
24878
24879 /* Counters and things for the proc filesystem. */
24880- atomic_t stats[SI_NUM_STATS];
24881+ atomic_unchecked_t stats[SI_NUM_STATS];
24882
24883 struct task_struct *thread;
24884
24885@@ -286,9 +286,9 @@ struct smi_info {
24886 };
24887
24888 #define smi_inc_stat(smi, stat) \
24889- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
24890+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
24891 #define smi_get_stat(smi, stat) \
24892- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
24893+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
24894
24895 #define SI_MAX_PARMS 4
24896
24897@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
24898 atomic_set(&new_smi->req_events, 0);
24899 new_smi->run_to_completion = 0;
24900 for (i = 0; i < SI_NUM_STATS; i++)
24901- atomic_set(&new_smi->stats[i], 0);
24902+ atomic_set_unchecked(&new_smi->stats[i], 0);
24903
24904 new_smi->interrupt_disabled = 1;
24905 atomic_set(&new_smi->stop_operation, 0);
24906diff -urNp linux-3.0.7/drivers/char/Kconfig linux-3.0.7/drivers/char/Kconfig
24907--- linux-3.0.7/drivers/char/Kconfig 2011-07-21 22:17:23.000000000 -0400
24908+++ linux-3.0.7/drivers/char/Kconfig 2011-08-23 21:48:14.000000000 -0400
24909@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
24910
24911 config DEVKMEM
24912 bool "/dev/kmem virtual device support"
24913- default y
24914+ default n
24915+ depends on !GRKERNSEC_KMEM
24916 help
24917 Say Y here if you want to support the /dev/kmem device. The
24918 /dev/kmem device is rarely used, but can be used for certain
24919@@ -596,6 +597,7 @@ config DEVPORT
24920 bool
24921 depends on !M68K
24922 depends on ISA || PCI
24923+ depends on !GRKERNSEC_KMEM
24924 default y
24925
24926 source "drivers/s390/char/Kconfig"
24927diff -urNp linux-3.0.7/drivers/char/mbcs.c linux-3.0.7/drivers/char/mbcs.c
24928--- linux-3.0.7/drivers/char/mbcs.c 2011-07-21 22:17:23.000000000 -0400
24929+++ linux-3.0.7/drivers/char/mbcs.c 2011-10-11 10:44:33.000000000 -0400
24930@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
24931 return 0;
24932 }
24933
24934-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
24935+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
24936 {
24937 .part_num = MBCS_PART_NUM,
24938 .mfg_num = MBCS_MFG_NUM,
24939diff -urNp linux-3.0.7/drivers/char/mem.c linux-3.0.7/drivers/char/mem.c
24940--- linux-3.0.7/drivers/char/mem.c 2011-07-21 22:17:23.000000000 -0400
24941+++ linux-3.0.7/drivers/char/mem.c 2011-08-23 21:48:14.000000000 -0400
24942@@ -18,6 +18,7 @@
24943 #include <linux/raw.h>
24944 #include <linux/tty.h>
24945 #include <linux/capability.h>
24946+#include <linux/security.h>
24947 #include <linux/ptrace.h>
24948 #include <linux/device.h>
24949 #include <linux/highmem.h>
24950@@ -34,6 +35,10 @@
24951 # include <linux/efi.h>
24952 #endif
24953
24954+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24955+extern struct file_operations grsec_fops;
24956+#endif
24957+
24958 static inline unsigned long size_inside_page(unsigned long start,
24959 unsigned long size)
24960 {
24961@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
24962
24963 while (cursor < to) {
24964 if (!devmem_is_allowed(pfn)) {
24965+#ifdef CONFIG_GRKERNSEC_KMEM
24966+ gr_handle_mem_readwrite(from, to);
24967+#else
24968 printk(KERN_INFO
24969 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24970 current->comm, from, to);
24971+#endif
24972 return 0;
24973 }
24974 cursor += PAGE_SIZE;
24975@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
24976 }
24977 return 1;
24978 }
24979+#elif defined(CONFIG_GRKERNSEC_KMEM)
24980+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24981+{
24982+ return 0;
24983+}
24984 #else
24985 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24986 {
24987@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
24988
24989 while (count > 0) {
24990 unsigned long remaining;
24991+ char *temp;
24992
24993 sz = size_inside_page(p, count);
24994
24995@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
24996 if (!ptr)
24997 return -EFAULT;
24998
24999- remaining = copy_to_user(buf, ptr, sz);
25000+#ifdef CONFIG_PAX_USERCOPY
25001+ temp = kmalloc(sz, GFP_KERNEL);
25002+ if (!temp) {
25003+ unxlate_dev_mem_ptr(p, ptr);
25004+ return -ENOMEM;
25005+ }
25006+ memcpy(temp, ptr, sz);
25007+#else
25008+ temp = ptr;
25009+#endif
25010+
25011+ remaining = copy_to_user(buf, temp, sz);
25012+
25013+#ifdef CONFIG_PAX_USERCOPY
25014+ kfree(temp);
25015+#endif
25016+
25017 unxlate_dev_mem_ptr(p, ptr);
25018 if (remaining)
25019 return -EFAULT;
25020@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25021 size_t count, loff_t *ppos)
25022 {
25023 unsigned long p = *ppos;
25024- ssize_t low_count, read, sz;
25025+ ssize_t low_count, read, sz, err = 0;
25026 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25027- int err = 0;
25028
25029 read = 0;
25030 if (p < (unsigned long) high_memory) {
25031@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25032 }
25033 #endif
25034 while (low_count > 0) {
25035+ char *temp;
25036+
25037 sz = size_inside_page(p, low_count);
25038
25039 /*
25040@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25041 */
25042 kbuf = xlate_dev_kmem_ptr((char *)p);
25043
25044- if (copy_to_user(buf, kbuf, sz))
25045+#ifdef CONFIG_PAX_USERCOPY
25046+ temp = kmalloc(sz, GFP_KERNEL);
25047+ if (!temp)
25048+ return -ENOMEM;
25049+ memcpy(temp, kbuf, sz);
25050+#else
25051+ temp = kbuf;
25052+#endif
25053+
25054+ err = copy_to_user(buf, temp, sz);
25055+
25056+#ifdef CONFIG_PAX_USERCOPY
25057+ kfree(temp);
25058+#endif
25059+
25060+ if (err)
25061 return -EFAULT;
25062 buf += sz;
25063 p += sz;
25064@@ -866,6 +913,9 @@ static const struct memdev {
25065 #ifdef CONFIG_CRASH_DUMP
25066 [12] = { "oldmem", 0, &oldmem_fops, NULL },
25067 #endif
25068+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25069+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25070+#endif
25071 };
25072
25073 static int memory_open(struct inode *inode, struct file *filp)
25074diff -urNp linux-3.0.7/drivers/char/nvram.c linux-3.0.7/drivers/char/nvram.c
25075--- linux-3.0.7/drivers/char/nvram.c 2011-07-21 22:17:23.000000000 -0400
25076+++ linux-3.0.7/drivers/char/nvram.c 2011-08-23 21:47:55.000000000 -0400
25077@@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
25078
25079 spin_unlock_irq(&rtc_lock);
25080
25081- if (copy_to_user(buf, contents, tmp - contents))
25082+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25083 return -EFAULT;
25084
25085 *ppos = i;
25086diff -urNp linux-3.0.7/drivers/char/random.c linux-3.0.7/drivers/char/random.c
25087--- linux-3.0.7/drivers/char/random.c 2011-09-02 18:11:21.000000000 -0400
25088+++ linux-3.0.7/drivers/char/random.c 2011-08-23 21:48:14.000000000 -0400
25089@@ -261,8 +261,13 @@
25090 /*
25091 * Configuration information
25092 */
25093+#ifdef CONFIG_GRKERNSEC_RANDNET
25094+#define INPUT_POOL_WORDS 512
25095+#define OUTPUT_POOL_WORDS 128
25096+#else
25097 #define INPUT_POOL_WORDS 128
25098 #define OUTPUT_POOL_WORDS 32
25099+#endif
25100 #define SEC_XFER_SIZE 512
25101 #define EXTRACT_SIZE 10
25102
25103@@ -300,10 +305,17 @@ static struct poolinfo {
25104 int poolwords;
25105 int tap1, tap2, tap3, tap4, tap5;
25106 } poolinfo_table[] = {
25107+#ifdef CONFIG_GRKERNSEC_RANDNET
25108+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25109+ { 512, 411, 308, 208, 104, 1 },
25110+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25111+ { 128, 103, 76, 51, 25, 1 },
25112+#else
25113 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25114 { 128, 103, 76, 51, 25, 1 },
25115 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25116 { 32, 26, 20, 14, 7, 1 },
25117+#endif
25118 #if 0
25119 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25120 { 2048, 1638, 1231, 819, 411, 1 },
25121@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25122
25123 extract_buf(r, tmp);
25124 i = min_t(int, nbytes, EXTRACT_SIZE);
25125- if (copy_to_user(buf, tmp, i)) {
25126+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25127 ret = -EFAULT;
25128 break;
25129 }
25130@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25131 #include <linux/sysctl.h>
25132
25133 static int min_read_thresh = 8, min_write_thresh;
25134-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25135+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25136 static int max_write_thresh = INPUT_POOL_WORDS * 32;
25137 static char sysctl_bootid[16];
25138
25139diff -urNp linux-3.0.7/drivers/char/sonypi.c linux-3.0.7/drivers/char/sonypi.c
25140--- linux-3.0.7/drivers/char/sonypi.c 2011-07-21 22:17:23.000000000 -0400
25141+++ linux-3.0.7/drivers/char/sonypi.c 2011-08-23 21:47:55.000000000 -0400
25142@@ -55,6 +55,7 @@
25143 #include <asm/uaccess.h>
25144 #include <asm/io.h>
25145 #include <asm/system.h>
25146+#include <asm/local.h>
25147
25148 #include <linux/sonypi.h>
25149
25150@@ -491,7 +492,7 @@ static struct sonypi_device {
25151 spinlock_t fifo_lock;
25152 wait_queue_head_t fifo_proc_list;
25153 struct fasync_struct *fifo_async;
25154- int open_count;
25155+ local_t open_count;
25156 int model;
25157 struct input_dev *input_jog_dev;
25158 struct input_dev *input_key_dev;
25159@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25160 static int sonypi_misc_release(struct inode *inode, struct file *file)
25161 {
25162 mutex_lock(&sonypi_device.lock);
25163- sonypi_device.open_count--;
25164+ local_dec(&sonypi_device.open_count);
25165 mutex_unlock(&sonypi_device.lock);
25166 return 0;
25167 }
25168@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25169 {
25170 mutex_lock(&sonypi_device.lock);
25171 /* Flush input queue on first open */
25172- if (!sonypi_device.open_count)
25173+ if (!local_read(&sonypi_device.open_count))
25174 kfifo_reset(&sonypi_device.fifo);
25175- sonypi_device.open_count++;
25176+ local_inc(&sonypi_device.open_count);
25177 mutex_unlock(&sonypi_device.lock);
25178
25179 return 0;
25180diff -urNp linux-3.0.7/drivers/char/tpm/tpm_bios.c linux-3.0.7/drivers/char/tpm/tpm_bios.c
25181--- linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-07-21 22:17:23.000000000 -0400
25182+++ linux-3.0.7/drivers/char/tpm/tpm_bios.c 2011-10-06 04:17:55.000000000 -0400
25183@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25184 event = addr;
25185
25186 if ((event->event_type == 0 && event->event_size == 0) ||
25187- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25188+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25189 return NULL;
25190
25191 return addr;
25192@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25193 return NULL;
25194
25195 if ((event->event_type == 0 && event->event_size == 0) ||
25196- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25197+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25198 return NULL;
25199
25200 (*pos)++;
25201@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25202 int i;
25203
25204 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25205- seq_putc(m, data[i]);
25206+ if (!seq_putc(m, data[i]))
25207+ return -EFAULT;
25208
25209 return 0;
25210 }
25211@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25212 log->bios_event_log_end = log->bios_event_log + len;
25213
25214 virt = acpi_os_map_memory(start, len);
25215+ if (!virt) {
25216+ kfree(log->bios_event_log);
25217+ log->bios_event_log = NULL;
25218+ return -EFAULT;
25219+ }
25220
25221- memcpy(log->bios_event_log, virt, len);
25222+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25223
25224 acpi_os_unmap_memory(virt, len);
25225 return 0;
25226diff -urNp linux-3.0.7/drivers/char/tpm/tpm.c linux-3.0.7/drivers/char/tpm/tpm.c
25227--- linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:54:53.000000000 -0400
25228+++ linux-3.0.7/drivers/char/tpm/tpm.c 2011-10-16 21:55:27.000000000 -0400
25229@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25230 chip->vendor.req_complete_val)
25231 goto out_recv;
25232
25233- if ((status == chip->vendor.req_canceled)) {
25234+ if (status == chip->vendor.req_canceled) {
25235 dev_err(chip->dev, "Operation Canceled\n");
25236 rc = -ECANCELED;
25237 goto out;
25238@@ -847,6 +847,8 @@ ssize_t tpm_show_pubek(struct device *de
25239
25240 struct tpm_chip *chip = dev_get_drvdata(dev);
25241
25242+ pax_track_stack();
25243+
25244 tpm_cmd.header.in = tpm_readpubek_header;
25245 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25246 "attempting to read the PUBEK");
25247diff -urNp linux-3.0.7/drivers/char/virtio_console.c linux-3.0.7/drivers/char/virtio_console.c
25248--- linux-3.0.7/drivers/char/virtio_console.c 2011-07-21 22:17:23.000000000 -0400
25249+++ linux-3.0.7/drivers/char/virtio_console.c 2011-10-06 04:17:55.000000000 -0400
25250@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25251 if (to_user) {
25252 ssize_t ret;
25253
25254- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25255+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25256 if (ret)
25257 return -EFAULT;
25258 } else {
25259@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25260 if (!port_has_data(port) && !port->host_connected)
25261 return 0;
25262
25263- return fill_readbuf(port, ubuf, count, true);
25264+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25265 }
25266
25267 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25268diff -urNp linux-3.0.7/drivers/crypto/hifn_795x.c linux-3.0.7/drivers/crypto/hifn_795x.c
25269--- linux-3.0.7/drivers/crypto/hifn_795x.c 2011-07-21 22:17:23.000000000 -0400
25270+++ linux-3.0.7/drivers/crypto/hifn_795x.c 2011-08-23 21:48:14.000000000 -0400
25271@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25272 0xCA, 0x34, 0x2B, 0x2E};
25273 struct scatterlist sg;
25274
25275+ pax_track_stack();
25276+
25277 memset(src, 0, sizeof(src));
25278 memset(ctx.key, 0, sizeof(ctx.key));
25279
25280diff -urNp linux-3.0.7/drivers/crypto/padlock-aes.c linux-3.0.7/drivers/crypto/padlock-aes.c
25281--- linux-3.0.7/drivers/crypto/padlock-aes.c 2011-07-21 22:17:23.000000000 -0400
25282+++ linux-3.0.7/drivers/crypto/padlock-aes.c 2011-08-23 21:48:14.000000000 -0400
25283@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25284 struct crypto_aes_ctx gen_aes;
25285 int cpu;
25286
25287+ pax_track_stack();
25288+
25289 if (key_len % 8) {
25290 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25291 return -EINVAL;
25292diff -urNp linux-3.0.7/drivers/dma/ioat/dma_v3.c linux-3.0.7/drivers/dma/ioat/dma_v3.c
25293--- linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-07-21 22:17:23.000000000 -0400
25294+++ linux-3.0.7/drivers/dma/ioat/dma_v3.c 2011-10-11 10:44:33.000000000 -0400
25295@@ -73,10 +73,10 @@
25296 /* provide a lookup table for setting the source address in the base or
25297 * extended descriptor of an xor or pq descriptor
25298 */
25299-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
25300-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
25301-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
25302-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
25303+static const u8 xor_idx_to_desc = 0xd0;
25304+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
25305+static const u8 pq_idx_to_desc = 0xf8;
25306+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
25307
25308 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
25309 {
25310diff -urNp linux-3.0.7/drivers/edac/amd64_edac.c linux-3.0.7/drivers/edac/amd64_edac.c
25311--- linux-3.0.7/drivers/edac/amd64_edac.c 2011-07-21 22:17:23.000000000 -0400
25312+++ linux-3.0.7/drivers/edac/amd64_edac.c 2011-10-11 10:44:33.000000000 -0400
25313@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25314 * PCI core identifies what devices are on a system during boot, and then
25315 * inquiry this table to see if this driver is for a given device found.
25316 */
25317-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25318+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25319 {
25320 .vendor = PCI_VENDOR_ID_AMD,
25321 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25322diff -urNp linux-3.0.7/drivers/edac/amd76x_edac.c linux-3.0.7/drivers/edac/amd76x_edac.c
25323--- linux-3.0.7/drivers/edac/amd76x_edac.c 2011-07-21 22:17:23.000000000 -0400
25324+++ linux-3.0.7/drivers/edac/amd76x_edac.c 2011-10-11 10:44:33.000000000 -0400
25325@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25326 edac_mc_free(mci);
25327 }
25328
25329-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25330+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25331 {
25332 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25333 AMD762},
25334diff -urNp linux-3.0.7/drivers/edac/e752x_edac.c linux-3.0.7/drivers/edac/e752x_edac.c
25335--- linux-3.0.7/drivers/edac/e752x_edac.c 2011-07-21 22:17:23.000000000 -0400
25336+++ linux-3.0.7/drivers/edac/e752x_edac.c 2011-10-11 10:44:33.000000000 -0400
25337@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25338 edac_mc_free(mci);
25339 }
25340
25341-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25342+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25343 {
25344 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25345 E7520},
25346diff -urNp linux-3.0.7/drivers/edac/e7xxx_edac.c linux-3.0.7/drivers/edac/e7xxx_edac.c
25347--- linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-07-21 22:17:23.000000000 -0400
25348+++ linux-3.0.7/drivers/edac/e7xxx_edac.c 2011-10-11 10:44:33.000000000 -0400
25349@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25350 edac_mc_free(mci);
25351 }
25352
25353-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25354+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25355 {
25356 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25357 E7205},
25358diff -urNp linux-3.0.7/drivers/edac/edac_pci_sysfs.c linux-3.0.7/drivers/edac/edac_pci_sysfs.c
25359--- linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-07-21 22:17:23.000000000 -0400
25360+++ linux-3.0.7/drivers/edac/edac_pci_sysfs.c 2011-08-23 21:47:55.000000000 -0400
25361@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25362 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25363 static int edac_pci_poll_msec = 1000; /* one second workq period */
25364
25365-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25366-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25367+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25368+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25369
25370 static struct kobject *edac_pci_top_main_kobj;
25371 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25372@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25373 edac_printk(KERN_CRIT, EDAC_PCI,
25374 "Signaled System Error on %s\n",
25375 pci_name(dev));
25376- atomic_inc(&pci_nonparity_count);
25377+ atomic_inc_unchecked(&pci_nonparity_count);
25378 }
25379
25380 if (status & (PCI_STATUS_PARITY)) {
25381@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25382 "Master Data Parity Error on %s\n",
25383 pci_name(dev));
25384
25385- atomic_inc(&pci_parity_count);
25386+ atomic_inc_unchecked(&pci_parity_count);
25387 }
25388
25389 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25390@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25391 "Detected Parity Error on %s\n",
25392 pci_name(dev));
25393
25394- atomic_inc(&pci_parity_count);
25395+ atomic_inc_unchecked(&pci_parity_count);
25396 }
25397 }
25398
25399@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25400 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25401 "Signaled System Error on %s\n",
25402 pci_name(dev));
25403- atomic_inc(&pci_nonparity_count);
25404+ atomic_inc_unchecked(&pci_nonparity_count);
25405 }
25406
25407 if (status & (PCI_STATUS_PARITY)) {
25408@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25409 "Master Data Parity Error on "
25410 "%s\n", pci_name(dev));
25411
25412- atomic_inc(&pci_parity_count);
25413+ atomic_inc_unchecked(&pci_parity_count);
25414 }
25415
25416 if (status & (PCI_STATUS_DETECTED_PARITY)) {
25417@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25418 "Detected Parity Error on %s\n",
25419 pci_name(dev));
25420
25421- atomic_inc(&pci_parity_count);
25422+ atomic_inc_unchecked(&pci_parity_count);
25423 }
25424 }
25425 }
25426@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25427 if (!check_pci_errors)
25428 return;
25429
25430- before_count = atomic_read(&pci_parity_count);
25431+ before_count = atomic_read_unchecked(&pci_parity_count);
25432
25433 /* scan all PCI devices looking for a Parity Error on devices and
25434 * bridges.
25435@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25436 /* Only if operator has selected panic on PCI Error */
25437 if (edac_pci_get_panic_on_pe()) {
25438 /* If the count is different 'after' from 'before' */
25439- if (before_count != atomic_read(&pci_parity_count))
25440+ if (before_count != atomic_read_unchecked(&pci_parity_count))
25441 panic("EDAC: PCI Parity Error");
25442 }
25443 }
25444diff -urNp linux-3.0.7/drivers/edac/i3000_edac.c linux-3.0.7/drivers/edac/i3000_edac.c
25445--- linux-3.0.7/drivers/edac/i3000_edac.c 2011-07-21 22:17:23.000000000 -0400
25446+++ linux-3.0.7/drivers/edac/i3000_edac.c 2011-10-11 10:44:33.000000000 -0400
25447@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
25448 edac_mc_free(mci);
25449 }
25450
25451-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
25452+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
25453 {
25454 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25455 I3000},
25456diff -urNp linux-3.0.7/drivers/edac/i3200_edac.c linux-3.0.7/drivers/edac/i3200_edac.c
25457--- linux-3.0.7/drivers/edac/i3200_edac.c 2011-07-21 22:17:23.000000000 -0400
25458+++ linux-3.0.7/drivers/edac/i3200_edac.c 2011-10-11 10:44:33.000000000 -0400
25459@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
25460 edac_mc_free(mci);
25461 }
25462
25463-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
25464+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
25465 {
25466 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25467 I3200},
25468diff -urNp linux-3.0.7/drivers/edac/i5000_edac.c linux-3.0.7/drivers/edac/i5000_edac.c
25469--- linux-3.0.7/drivers/edac/i5000_edac.c 2011-07-21 22:17:23.000000000 -0400
25470+++ linux-3.0.7/drivers/edac/i5000_edac.c 2011-10-11 10:44:33.000000000 -0400
25471@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
25472 *
25473 * The "E500P" device is the first device supported.
25474 */
25475-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
25476+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
25477 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
25478 .driver_data = I5000P},
25479
25480diff -urNp linux-3.0.7/drivers/edac/i5100_edac.c linux-3.0.7/drivers/edac/i5100_edac.c
25481--- linux-3.0.7/drivers/edac/i5100_edac.c 2011-07-21 22:17:23.000000000 -0400
25482+++ linux-3.0.7/drivers/edac/i5100_edac.c 2011-10-11 10:44:33.000000000 -0400
25483@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
25484 edac_mc_free(mci);
25485 }
25486
25487-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
25488+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
25489 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
25490 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
25491 { 0, }
25492diff -urNp linux-3.0.7/drivers/edac/i5400_edac.c linux-3.0.7/drivers/edac/i5400_edac.c
25493--- linux-3.0.7/drivers/edac/i5400_edac.c 2011-07-21 22:17:23.000000000 -0400
25494+++ linux-3.0.7/drivers/edac/i5400_edac.c 2011-10-11 10:44:33.000000000 -0400
25495@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
25496 *
25497 * The "E500P" device is the first device supported.
25498 */
25499-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
25500+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
25501 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
25502 {0,} /* 0 terminated list. */
25503 };
25504diff -urNp linux-3.0.7/drivers/edac/i7300_edac.c linux-3.0.7/drivers/edac/i7300_edac.c
25505--- linux-3.0.7/drivers/edac/i7300_edac.c 2011-07-21 22:17:23.000000000 -0400
25506+++ linux-3.0.7/drivers/edac/i7300_edac.c 2011-10-11 10:44:33.000000000 -0400
25507@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
25508 *
25509 * Has only 8086:360c PCI ID
25510 */
25511-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
25512+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
25513 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
25514 {0,} /* 0 terminated list. */
25515 };
25516diff -urNp linux-3.0.7/drivers/edac/i7core_edac.c linux-3.0.7/drivers/edac/i7core_edac.c
25517--- linux-3.0.7/drivers/edac/i7core_edac.c 2011-09-02 18:11:26.000000000 -0400
25518+++ linux-3.0.7/drivers/edac/i7core_edac.c 2011-10-11 10:44:33.000000000 -0400
25519@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
25520 /*
25521 * pci_device_id table for which devices we are looking for
25522 */
25523-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
25524+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
25525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
25526 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
25527 {0,} /* 0 terminated list. */
25528diff -urNp linux-3.0.7/drivers/edac/i82443bxgx_edac.c linux-3.0.7/drivers/edac/i82443bxgx_edac.c
25529--- linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-07-21 22:17:23.000000000 -0400
25530+++ linux-3.0.7/drivers/edac/i82443bxgx_edac.c 2011-10-11 10:44:33.000000000 -0400
25531@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
25532
25533 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
25534
25535-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
25536+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
25537 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
25538 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
25539 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
25540diff -urNp linux-3.0.7/drivers/edac/i82860_edac.c linux-3.0.7/drivers/edac/i82860_edac.c
25541--- linux-3.0.7/drivers/edac/i82860_edac.c 2011-07-21 22:17:23.000000000 -0400
25542+++ linux-3.0.7/drivers/edac/i82860_edac.c 2011-10-11 10:44:33.000000000 -0400
25543@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
25544 edac_mc_free(mci);
25545 }
25546
25547-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
25548+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
25549 {
25550 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25551 I82860},
25552diff -urNp linux-3.0.7/drivers/edac/i82875p_edac.c linux-3.0.7/drivers/edac/i82875p_edac.c
25553--- linux-3.0.7/drivers/edac/i82875p_edac.c 2011-07-21 22:17:23.000000000 -0400
25554+++ linux-3.0.7/drivers/edac/i82875p_edac.c 2011-10-11 10:44:33.000000000 -0400
25555@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
25556 edac_mc_free(mci);
25557 }
25558
25559-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
25560+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
25561 {
25562 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25563 I82875P},
25564diff -urNp linux-3.0.7/drivers/edac/i82975x_edac.c linux-3.0.7/drivers/edac/i82975x_edac.c
25565--- linux-3.0.7/drivers/edac/i82975x_edac.c 2011-07-21 22:17:23.000000000 -0400
25566+++ linux-3.0.7/drivers/edac/i82975x_edac.c 2011-10-11 10:44:33.000000000 -0400
25567@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
25568 edac_mc_free(mci);
25569 }
25570
25571-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
25572+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
25573 {
25574 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25575 I82975X
25576diff -urNp linux-3.0.7/drivers/edac/mce_amd.h linux-3.0.7/drivers/edac/mce_amd.h
25577--- linux-3.0.7/drivers/edac/mce_amd.h 2011-07-21 22:17:23.000000000 -0400
25578+++ linux-3.0.7/drivers/edac/mce_amd.h 2011-08-23 21:47:55.000000000 -0400
25579@@ -83,7 +83,7 @@ struct amd_decoder_ops {
25580 bool (*dc_mce)(u16, u8);
25581 bool (*ic_mce)(u16, u8);
25582 bool (*nb_mce)(u16, u8);
25583-};
25584+} __no_const;
25585
25586 void amd_report_gart_errors(bool);
25587 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
25588diff -urNp linux-3.0.7/drivers/edac/r82600_edac.c linux-3.0.7/drivers/edac/r82600_edac.c
25589--- linux-3.0.7/drivers/edac/r82600_edac.c 2011-07-21 22:17:23.000000000 -0400
25590+++ linux-3.0.7/drivers/edac/r82600_edac.c 2011-10-11 10:44:33.000000000 -0400
25591@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
25592 edac_mc_free(mci);
25593 }
25594
25595-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
25596+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
25597 {
25598 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
25599 },
25600diff -urNp linux-3.0.7/drivers/edac/x38_edac.c linux-3.0.7/drivers/edac/x38_edac.c
25601--- linux-3.0.7/drivers/edac/x38_edac.c 2011-07-21 22:17:23.000000000 -0400
25602+++ linux-3.0.7/drivers/edac/x38_edac.c 2011-10-11 10:44:33.000000000 -0400
25603@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
25604 edac_mc_free(mci);
25605 }
25606
25607-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
25608+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
25609 {
25610 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25611 X38},
25612diff -urNp linux-3.0.7/drivers/firewire/core-card.c linux-3.0.7/drivers/firewire/core-card.c
25613--- linux-3.0.7/drivers/firewire/core-card.c 2011-07-21 22:17:23.000000000 -0400
25614+++ linux-3.0.7/drivers/firewire/core-card.c 2011-08-23 21:47:55.000000000 -0400
25615@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
25616
25617 void fw_core_remove_card(struct fw_card *card)
25618 {
25619- struct fw_card_driver dummy_driver = dummy_driver_template;
25620+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
25621
25622 card->driver->update_phy_reg(card, 4,
25623 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
25624diff -urNp linux-3.0.7/drivers/firewire/core-cdev.c linux-3.0.7/drivers/firewire/core-cdev.c
25625--- linux-3.0.7/drivers/firewire/core-cdev.c 2011-09-02 18:11:21.000000000 -0400
25626+++ linux-3.0.7/drivers/firewire/core-cdev.c 2011-08-23 21:47:55.000000000 -0400
25627@@ -1313,8 +1313,7 @@ static int init_iso_resource(struct clie
25628 int ret;
25629
25630 if ((request->channels == 0 && request->bandwidth == 0) ||
25631- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
25632- request->bandwidth < 0)
25633+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
25634 return -EINVAL;
25635
25636 r = kmalloc(sizeof(*r), GFP_KERNEL);
25637diff -urNp linux-3.0.7/drivers/firewire/core.h linux-3.0.7/drivers/firewire/core.h
25638--- linux-3.0.7/drivers/firewire/core.h 2011-07-21 22:17:23.000000000 -0400
25639+++ linux-3.0.7/drivers/firewire/core.h 2011-08-23 21:47:55.000000000 -0400
25640@@ -101,6 +101,7 @@ struct fw_card_driver {
25641
25642 int (*stop_iso)(struct fw_iso_context *ctx);
25643 };
25644+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
25645
25646 void fw_card_initialize(struct fw_card *card,
25647 const struct fw_card_driver *driver, struct device *device);
25648diff -urNp linux-3.0.7/drivers/firewire/core-transaction.c linux-3.0.7/drivers/firewire/core-transaction.c
25649--- linux-3.0.7/drivers/firewire/core-transaction.c 2011-07-21 22:17:23.000000000 -0400
25650+++ linux-3.0.7/drivers/firewire/core-transaction.c 2011-08-23 21:48:14.000000000 -0400
25651@@ -37,6 +37,7 @@
25652 #include <linux/timer.h>
25653 #include <linux/types.h>
25654 #include <linux/workqueue.h>
25655+#include <linux/sched.h>
25656
25657 #include <asm/byteorder.h>
25658
25659@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
25660 struct transaction_callback_data d;
25661 struct fw_transaction t;
25662
25663+ pax_track_stack();
25664+
25665 init_timer_on_stack(&t.split_timeout_timer);
25666 init_completion(&d.done);
25667 d.payload = payload;
25668diff -urNp linux-3.0.7/drivers/firmware/dmi_scan.c linux-3.0.7/drivers/firmware/dmi_scan.c
25669--- linux-3.0.7/drivers/firmware/dmi_scan.c 2011-07-21 22:17:23.000000000 -0400
25670+++ linux-3.0.7/drivers/firmware/dmi_scan.c 2011-10-06 04:17:55.000000000 -0400
25671@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
25672 }
25673 }
25674 else {
25675- /*
25676- * no iounmap() for that ioremap(); it would be a no-op, but
25677- * it's so early in setup that sucker gets confused into doing
25678- * what it shouldn't if we actually call it.
25679- */
25680 p = dmi_ioremap(0xF0000, 0x10000);
25681 if (p == NULL)
25682 goto error;
25683@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
25684 if (buf == NULL)
25685 return -1;
25686
25687- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
25688+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
25689
25690 iounmap(buf);
25691 return 0;
25692diff -urNp linux-3.0.7/drivers/gpio/vr41xx_giu.c linux-3.0.7/drivers/gpio/vr41xx_giu.c
25693--- linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-07-21 22:17:23.000000000 -0400
25694+++ linux-3.0.7/drivers/gpio/vr41xx_giu.c 2011-08-23 21:47:55.000000000 -0400
25695@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
25696 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
25697 maskl, pendl, maskh, pendh);
25698
25699- atomic_inc(&irq_err_count);
25700+ atomic_inc_unchecked(&irq_err_count);
25701
25702 return -EINVAL;
25703 }
25704diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc.c linux-3.0.7/drivers/gpu/drm/drm_crtc.c
25705--- linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-07-21 22:17:23.000000000 -0400
25706+++ linux-3.0.7/drivers/gpu/drm/drm_crtc.c 2011-10-06 04:17:55.000000000 -0400
25707@@ -1372,7 +1372,7 @@ int drm_mode_getconnector(struct drm_dev
25708 */
25709 if ((out_resp->count_modes >= mode_count) && mode_count) {
25710 copied = 0;
25711- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
25712+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
25713 list_for_each_entry(mode, &connector->modes, head) {
25714 drm_crtc_convert_to_umode(&u_mode, mode);
25715 if (copy_to_user(mode_ptr + copied,
25716@@ -1387,8 +1387,8 @@ int drm_mode_getconnector(struct drm_dev
25717
25718 if ((out_resp->count_props >= props_count) && props_count) {
25719 copied = 0;
25720- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
25721- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
25722+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
25723+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
25724 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
25725 if (connector->property_ids[i] != 0) {
25726 if (put_user(connector->property_ids[i],
25727@@ -1410,7 +1410,7 @@ int drm_mode_getconnector(struct drm_dev
25728
25729 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
25730 copied = 0;
25731- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
25732+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
25733 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
25734 if (connector->encoder_ids[i] != 0) {
25735 if (put_user(connector->encoder_ids[i],
25736@@ -1569,7 +1569,7 @@ int drm_mode_setcrtc(struct drm_device *
25737 }
25738
25739 for (i = 0; i < crtc_req->count_connectors; i++) {
25740- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
25741+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
25742 if (get_user(out_id, &set_connectors_ptr[i])) {
25743 ret = -EFAULT;
25744 goto out;
25745@@ -1850,7 +1850,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
25746 fb = obj_to_fb(obj);
25747
25748 num_clips = r->num_clips;
25749- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
25750+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
25751
25752 if (!num_clips != !clips_ptr) {
25753 ret = -EINVAL;
25754@@ -2270,7 +2270,7 @@ int drm_mode_getproperty_ioctl(struct dr
25755 out_resp->flags = property->flags;
25756
25757 if ((out_resp->count_values >= value_count) && value_count) {
25758- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
25759+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
25760 for (i = 0; i < value_count; i++) {
25761 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
25762 ret = -EFAULT;
25763@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct dr
25764 if (property->flags & DRM_MODE_PROP_ENUM) {
25765 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
25766 copied = 0;
25767- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
25768+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
25769 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
25770
25771 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
25772@@ -2306,7 +2306,7 @@ int drm_mode_getproperty_ioctl(struct dr
25773 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
25774 copied = 0;
25775 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
25776- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
25777+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
25778
25779 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
25780 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
25781@@ -2367,7 +2367,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25782 struct drm_mode_get_blob *out_resp = data;
25783 struct drm_property_blob *blob;
25784 int ret = 0;
25785- void *blob_ptr;
25786+ void __user *blob_ptr;
25787
25788 if (!drm_core_check_feature(dev, DRIVER_MODESET))
25789 return -EINVAL;
25790@@ -2381,7 +2381,7 @@ int drm_mode_getblob_ioctl(struct drm_de
25791 blob = obj_to_blob(obj);
25792
25793 if (out_resp->length == blob->length) {
25794- blob_ptr = (void *)(unsigned long)out_resp->data;
25795+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
25796 if (copy_to_user(blob_ptr, blob->data, blob->length)){
25797 ret = -EFAULT;
25798 goto done;
25799diff -urNp linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c
25800--- linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-07-21 22:17:23.000000000 -0400
25801+++ linux-3.0.7/drivers/gpu/drm/drm_crtc_helper.c 2011-08-23 21:48:14.000000000 -0400
25802@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
25803 struct drm_crtc *tmp;
25804 int crtc_mask = 1;
25805
25806- WARN(!crtc, "checking null crtc?\n");
25807+ BUG_ON(!crtc);
25808
25809 dev = crtc->dev;
25810
25811@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
25812 struct drm_encoder *encoder;
25813 bool ret = true;
25814
25815+ pax_track_stack();
25816+
25817 crtc->enabled = drm_helper_crtc_in_use(crtc);
25818 if (!crtc->enabled)
25819 return true;
25820diff -urNp linux-3.0.7/drivers/gpu/drm/drm_drv.c linux-3.0.7/drivers/gpu/drm/drm_drv.c
25821--- linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-07-21 22:17:23.000000000 -0400
25822+++ linux-3.0.7/drivers/gpu/drm/drm_drv.c 2011-10-06 04:17:55.000000000 -0400
25823@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
25824 /**
25825 * Copy and IOCTL return string to user space
25826 */
25827-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
25828+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
25829 {
25830 int len;
25831
25832@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
25833
25834 dev = file_priv->minor->dev;
25835 atomic_inc(&dev->ioctl_count);
25836- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
25837+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
25838 ++file_priv->ioctl_count;
25839
25840 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
25841diff -urNp linux-3.0.7/drivers/gpu/drm/drm_fops.c linux-3.0.7/drivers/gpu/drm/drm_fops.c
25842--- linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-07-21 22:17:23.000000000 -0400
25843+++ linux-3.0.7/drivers/gpu/drm/drm_fops.c 2011-08-23 21:47:55.000000000 -0400
25844@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
25845 }
25846
25847 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
25848- atomic_set(&dev->counts[i], 0);
25849+ atomic_set_unchecked(&dev->counts[i], 0);
25850
25851 dev->sigdata.lock = NULL;
25852
25853@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
25854
25855 retcode = drm_open_helper(inode, filp, dev);
25856 if (!retcode) {
25857- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
25858- if (!dev->open_count++)
25859+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
25860+ if (local_inc_return(&dev->open_count) == 1)
25861 retcode = drm_setup(dev);
25862 }
25863 if (!retcode) {
25864@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
25865
25866 mutex_lock(&drm_global_mutex);
25867
25868- DRM_DEBUG("open_count = %d\n", dev->open_count);
25869+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
25870
25871 if (dev->driver->preclose)
25872 dev->driver->preclose(dev, file_priv);
25873@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
25874 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
25875 task_pid_nr(current),
25876 (long)old_encode_dev(file_priv->minor->device),
25877- dev->open_count);
25878+ local_read(&dev->open_count));
25879
25880 /* if the master has gone away we can't do anything with the lock */
25881 if (file_priv->minor->master)
25882@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
25883 * End inline drm_release
25884 */
25885
25886- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
25887- if (!--dev->open_count) {
25888+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
25889+ if (local_dec_and_test(&dev->open_count)) {
25890 if (atomic_read(&dev->ioctl_count)) {
25891 DRM_ERROR("Device busy: %d\n",
25892 atomic_read(&dev->ioctl_count));
25893diff -urNp linux-3.0.7/drivers/gpu/drm/drm_global.c linux-3.0.7/drivers/gpu/drm/drm_global.c
25894--- linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-07-21 22:17:23.000000000 -0400
25895+++ linux-3.0.7/drivers/gpu/drm/drm_global.c 2011-08-23 21:47:55.000000000 -0400
25896@@ -36,7 +36,7 @@
25897 struct drm_global_item {
25898 struct mutex mutex;
25899 void *object;
25900- int refcount;
25901+ atomic_t refcount;
25902 };
25903
25904 static struct drm_global_item glob[DRM_GLOBAL_NUM];
25905@@ -49,7 +49,7 @@ void drm_global_init(void)
25906 struct drm_global_item *item = &glob[i];
25907 mutex_init(&item->mutex);
25908 item->object = NULL;
25909- item->refcount = 0;
25910+ atomic_set(&item->refcount, 0);
25911 }
25912 }
25913
25914@@ -59,7 +59,7 @@ void drm_global_release(void)
25915 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
25916 struct drm_global_item *item = &glob[i];
25917 BUG_ON(item->object != NULL);
25918- BUG_ON(item->refcount != 0);
25919+ BUG_ON(atomic_read(&item->refcount) != 0);
25920 }
25921 }
25922
25923@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
25924 void *object;
25925
25926 mutex_lock(&item->mutex);
25927- if (item->refcount == 0) {
25928+ if (atomic_read(&item->refcount) == 0) {
25929 item->object = kzalloc(ref->size, GFP_KERNEL);
25930 if (unlikely(item->object == NULL)) {
25931 ret = -ENOMEM;
25932@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
25933 goto out_err;
25934
25935 }
25936- ++item->refcount;
25937+ atomic_inc(&item->refcount);
25938 ref->object = item->object;
25939 object = item->object;
25940 mutex_unlock(&item->mutex);
25941@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
25942 struct drm_global_item *item = &glob[ref->global_type];
25943
25944 mutex_lock(&item->mutex);
25945- BUG_ON(item->refcount == 0);
25946+ BUG_ON(atomic_read(&item->refcount) == 0);
25947 BUG_ON(ref->object != item->object);
25948- if (--item->refcount == 0) {
25949+ if (atomic_dec_and_test(&item->refcount)) {
25950 ref->release(ref);
25951 item->object = NULL;
25952 }
25953diff -urNp linux-3.0.7/drivers/gpu/drm/drm_info.c linux-3.0.7/drivers/gpu/drm/drm_info.c
25954--- linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-07-21 22:17:23.000000000 -0400
25955+++ linux-3.0.7/drivers/gpu/drm/drm_info.c 2011-08-23 21:48:14.000000000 -0400
25956@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
25957 struct drm_local_map *map;
25958 struct drm_map_list *r_list;
25959
25960- /* Hardcoded from _DRM_FRAME_BUFFER,
25961- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
25962- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
25963- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
25964+ static const char * const types[] = {
25965+ [_DRM_FRAME_BUFFER] = "FB",
25966+ [_DRM_REGISTERS] = "REG",
25967+ [_DRM_SHM] = "SHM",
25968+ [_DRM_AGP] = "AGP",
25969+ [_DRM_SCATTER_GATHER] = "SG",
25970+ [_DRM_CONSISTENT] = "PCI",
25971+ [_DRM_GEM] = "GEM" };
25972 const char *type;
25973 int i;
25974
25975@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
25976 map = r_list->map;
25977 if (!map)
25978 continue;
25979- if (map->type < 0 || map->type > 5)
25980+ if (map->type >= ARRAY_SIZE(types))
25981 type = "??";
25982 else
25983 type = types[map->type];
25984@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
25985 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
25986 vma->vm_flags & VM_LOCKED ? 'l' : '-',
25987 vma->vm_flags & VM_IO ? 'i' : '-',
25988+#ifdef CONFIG_GRKERNSEC_HIDESYM
25989+ 0);
25990+#else
25991 vma->vm_pgoff);
25992+#endif
25993
25994 #if defined(__i386__)
25995 pgprot = pgprot_val(vma->vm_page_prot);
25996diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioc32.c linux-3.0.7/drivers/gpu/drm/drm_ioc32.c
25997--- linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-07-21 22:17:23.000000000 -0400
25998+++ linux-3.0.7/drivers/gpu/drm/drm_ioc32.c 2011-10-06 04:17:55.000000000 -0400
25999@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26000 request = compat_alloc_user_space(nbytes);
26001 if (!access_ok(VERIFY_WRITE, request, nbytes))
26002 return -EFAULT;
26003- list = (struct drm_buf_desc *) (request + 1);
26004+ list = (struct drm_buf_desc __user *) (request + 1);
26005
26006 if (__put_user(count, &request->count)
26007 || __put_user(list, &request->list))
26008@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26009 request = compat_alloc_user_space(nbytes);
26010 if (!access_ok(VERIFY_WRITE, request, nbytes))
26011 return -EFAULT;
26012- list = (struct drm_buf_pub *) (request + 1);
26013+ list = (struct drm_buf_pub __user *) (request + 1);
26014
26015 if (__put_user(count, &request->count)
26016 || __put_user(list, &request->list))
26017diff -urNp linux-3.0.7/drivers/gpu/drm/drm_ioctl.c linux-3.0.7/drivers/gpu/drm/drm_ioctl.c
26018--- linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-07-21 22:17:23.000000000 -0400
26019+++ linux-3.0.7/drivers/gpu/drm/drm_ioctl.c 2011-08-23 21:47:55.000000000 -0400
26020@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26021 stats->data[i].value =
26022 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26023 else
26024- stats->data[i].value = atomic_read(&dev->counts[i]);
26025+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26026 stats->data[i].type = dev->types[i];
26027 }
26028
26029diff -urNp linux-3.0.7/drivers/gpu/drm/drm_lock.c linux-3.0.7/drivers/gpu/drm/drm_lock.c
26030--- linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-07-21 22:17:23.000000000 -0400
26031+++ linux-3.0.7/drivers/gpu/drm/drm_lock.c 2011-08-23 21:47:55.000000000 -0400
26032@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26033 if (drm_lock_take(&master->lock, lock->context)) {
26034 master->lock.file_priv = file_priv;
26035 master->lock.lock_time = jiffies;
26036- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26037+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26038 break; /* Got lock */
26039 }
26040
26041@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26042 return -EINVAL;
26043 }
26044
26045- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26046+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26047
26048 if (drm_lock_free(&master->lock, lock->context)) {
26049 /* FIXME: Should really bail out here. */
26050diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c
26051--- linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-07-21 22:17:23.000000000 -0400
26052+++ linux-3.0.7/drivers/gpu/drm/i810/i810_dma.c 2011-08-23 21:47:55.000000000 -0400
26053@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26054 dma->buflist[vertex->idx],
26055 vertex->discard, vertex->used);
26056
26057- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26058- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26059+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26060+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26061 sarea_priv->last_enqueue = dev_priv->counter - 1;
26062 sarea_priv->last_dispatch = (int)hw_status[5];
26063
26064@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26065 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26066 mc->last_render);
26067
26068- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26069- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26070+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26071+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26072 sarea_priv->last_enqueue = dev_priv->counter - 1;
26073 sarea_priv->last_dispatch = (int)hw_status[5];
26074
26075diff -urNp linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h
26076--- linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-07-21 22:17:23.000000000 -0400
26077+++ linux-3.0.7/drivers/gpu/drm/i810/i810_drv.h 2011-08-23 21:47:55.000000000 -0400
26078@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26079 int page_flipping;
26080
26081 wait_queue_head_t irq_queue;
26082- atomic_t irq_received;
26083- atomic_t irq_emitted;
26084+ atomic_unchecked_t irq_received;
26085+ atomic_unchecked_t irq_emitted;
26086
26087 int front_offset;
26088 } drm_i810_private_t;
26089diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c
26090--- linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-07-21 22:17:23.000000000 -0400
26091+++ linux-3.0.7/drivers/gpu/drm/i915/i915_debugfs.c 2011-10-06 04:17:55.000000000 -0400
26092@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26093 I915_READ(GTIMR));
26094 }
26095 seq_printf(m, "Interrupts received: %d\n",
26096- atomic_read(&dev_priv->irq_received));
26097+ atomic_read_unchecked(&dev_priv->irq_received));
26098 for (i = 0; i < I915_NUM_RINGS; i++) {
26099 if (IS_GEN6(dev)) {
26100 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26101@@ -1147,7 +1147,7 @@ static int i915_opregion(struct seq_file
26102 return ret;
26103
26104 if (opregion->header)
26105- seq_write(m, opregion->header, OPREGION_SIZE);
26106+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26107
26108 mutex_unlock(&dev->struct_mutex);
26109
26110diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c
26111--- linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-09-02 18:11:21.000000000 -0400
26112+++ linux-3.0.7/drivers/gpu/drm/i915/i915_dma.c 2011-08-23 21:47:55.000000000 -0400
26113@@ -1169,7 +1169,7 @@ static bool i915_switcheroo_can_switch(s
26114 bool can_switch;
26115
26116 spin_lock(&dev->count_lock);
26117- can_switch = (dev->open_count == 0);
26118+ can_switch = (local_read(&dev->open_count) == 0);
26119 spin_unlock(&dev->count_lock);
26120 return can_switch;
26121 }
26122diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h
26123--- linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-07-21 22:17:23.000000000 -0400
26124+++ linux-3.0.7/drivers/gpu/drm/i915/i915_drv.h 2011-08-23 21:47:55.000000000 -0400
26125@@ -219,7 +219,7 @@ struct drm_i915_display_funcs {
26126 /* render clock increase/decrease */
26127 /* display clock increase/decrease */
26128 /* pll clock increase/decrease */
26129-};
26130+} __no_const;
26131
26132 struct intel_device_info {
26133 u8 gen;
26134@@ -300,7 +300,7 @@ typedef struct drm_i915_private {
26135 int current_page;
26136 int page_flipping;
26137
26138- atomic_t irq_received;
26139+ atomic_unchecked_t irq_received;
26140
26141 /* protects the irq masks */
26142 spinlock_t irq_lock;
26143@@ -874,7 +874,7 @@ struct drm_i915_gem_object {
26144 * will be page flipped away on the next vblank. When it
26145 * reaches 0, dev_priv->pending_flip_queue will be woken up.
26146 */
26147- atomic_t pending_flip;
26148+ atomic_unchecked_t pending_flip;
26149 };
26150
26151 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26152@@ -1247,7 +1247,7 @@ extern int intel_setup_gmbus(struct drm_
26153 extern void intel_teardown_gmbus(struct drm_device *dev);
26154 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26155 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26156-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26157+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26158 {
26159 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26160 }
26161diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26162--- linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-07-21 22:17:23.000000000 -0400
26163+++ linux-3.0.7/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-23 21:47:55.000000000 -0400
26164@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26165 i915_gem_clflush_object(obj);
26166
26167 if (obj->base.pending_write_domain)
26168- cd->flips |= atomic_read(&obj->pending_flip);
26169+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26170
26171 /* The actual obj->write_domain will be updated with
26172 * pending_write_domain after we emit the accumulated flush for all
26173diff -urNp linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c
26174--- linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-09-02 18:11:21.000000000 -0400
26175+++ linux-3.0.7/drivers/gpu/drm/i915/i915_irq.c 2011-08-23 21:47:55.000000000 -0400
26176@@ -473,7 +473,7 @@ static irqreturn_t ivybridge_irq_handler
26177 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26178 struct drm_i915_master_private *master_priv;
26179
26180- atomic_inc(&dev_priv->irq_received);
26181+ atomic_inc_unchecked(&dev_priv->irq_received);
26182
26183 /* disable master interrupt before clearing iir */
26184 de_ier = I915_READ(DEIER);
26185@@ -563,7 +563,7 @@ static irqreturn_t ironlake_irq_handler(
26186 struct drm_i915_master_private *master_priv;
26187 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26188
26189- atomic_inc(&dev_priv->irq_received);
26190+ atomic_inc_unchecked(&dev_priv->irq_received);
26191
26192 if (IS_GEN6(dev))
26193 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26194@@ -1226,7 +1226,7 @@ static irqreturn_t i915_driver_irq_handl
26195 int ret = IRQ_NONE, pipe;
26196 bool blc_event = false;
26197
26198- atomic_inc(&dev_priv->irq_received);
26199+ atomic_inc_unchecked(&dev_priv->irq_received);
26200
26201 iir = I915_READ(IIR);
26202
26203@@ -1735,7 +1735,7 @@ static void ironlake_irq_preinstall(stru
26204 {
26205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26206
26207- atomic_set(&dev_priv->irq_received, 0);
26208+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26209
26210 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26211 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26212@@ -1899,7 +1899,7 @@ static void i915_driver_irq_preinstall(s
26213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26214 int pipe;
26215
26216- atomic_set(&dev_priv->irq_received, 0);
26217+ atomic_set_unchecked(&dev_priv->irq_received, 0);
26218
26219 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26220 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26221diff -urNp linux-3.0.7/drivers/gpu/drm/i915/intel_display.c linux-3.0.7/drivers/gpu/drm/i915/intel_display.c
26222--- linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-09-02 18:11:21.000000000 -0400
26223+++ linux-3.0.7/drivers/gpu/drm/i915/intel_display.c 2011-08-23 21:47:55.000000000 -0400
26224@@ -1961,7 +1961,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26225
26226 wait_event(dev_priv->pending_flip_queue,
26227 atomic_read(&dev_priv->mm.wedged) ||
26228- atomic_read(&obj->pending_flip) == 0);
26229+ atomic_read_unchecked(&obj->pending_flip) == 0);
26230
26231 /* Big Hammer, we also need to ensure that any pending
26232 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26233@@ -2548,7 +2548,7 @@ static void intel_crtc_wait_for_pending_
26234 obj = to_intel_framebuffer(crtc->fb)->obj;
26235 dev_priv = crtc->dev->dev_private;
26236 wait_event(dev_priv->pending_flip_queue,
26237- atomic_read(&obj->pending_flip) == 0);
26238+ atomic_read_unchecked(&obj->pending_flip) == 0);
26239 }
26240
26241 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26242@@ -6225,7 +6225,7 @@ static void do_intel_finish_page_flip(st
26243
26244 atomic_clear_mask(1 << intel_crtc->plane,
26245 &obj->pending_flip.counter);
26246- if (atomic_read(&obj->pending_flip) == 0)
26247+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26248 wake_up(&dev_priv->pending_flip_queue);
26249
26250 schedule_work(&work->work);
26251@@ -6514,7 +6514,7 @@ static int intel_crtc_page_flip(struct d
26252 /* Block clients from rendering to the new back buffer until
26253 * the flip occurs and the object is no longer visible.
26254 */
26255- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26256+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26257
26258 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26259 if (ret)
26260@@ -6527,7 +6527,7 @@ static int intel_crtc_page_flip(struct d
26261 return 0;
26262
26263 cleanup_pending:
26264- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26265+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26266 cleanup_objs:
26267 drm_gem_object_unreference(&work->old_fb_obj->base);
26268 drm_gem_object_unreference(&obj->base);
26269diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h
26270--- linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-07-21 22:17:23.000000000 -0400
26271+++ linux-3.0.7/drivers/gpu/drm/mga/mga_drv.h 2011-08-23 21:47:55.000000000 -0400
26272@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26273 u32 clear_cmd;
26274 u32 maccess;
26275
26276- atomic_t vbl_received; /**< Number of vblanks received. */
26277+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26278 wait_queue_head_t fence_queue;
26279- atomic_t last_fence_retired;
26280+ atomic_unchecked_t last_fence_retired;
26281 u32 next_fence_to_post;
26282
26283 unsigned int fb_cpp;
26284diff -urNp linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c
26285--- linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-07-21 22:17:23.000000000 -0400
26286+++ linux-3.0.7/drivers/gpu/drm/mga/mga_irq.c 2011-08-23 21:47:55.000000000 -0400
26287@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26288 if (crtc != 0)
26289 return 0;
26290
26291- return atomic_read(&dev_priv->vbl_received);
26292+ return atomic_read_unchecked(&dev_priv->vbl_received);
26293 }
26294
26295
26296@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26297 /* VBLANK interrupt */
26298 if (status & MGA_VLINEPEN) {
26299 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26300- atomic_inc(&dev_priv->vbl_received);
26301+ atomic_inc_unchecked(&dev_priv->vbl_received);
26302 drm_handle_vblank(dev, 0);
26303 handled = 1;
26304 }
26305@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26306 if ((prim_start & ~0x03) != (prim_end & ~0x03))
26307 MGA_WRITE(MGA_PRIMEND, prim_end);
26308
26309- atomic_inc(&dev_priv->last_fence_retired);
26310+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26311 DRM_WAKEUP(&dev_priv->fence_queue);
26312 handled = 1;
26313 }
26314@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26315 * using fences.
26316 */
26317 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26318- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26319+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26320 - *sequence) <= (1 << 23)));
26321
26322 *sequence = cur_fence;
26323diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c
26324--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-07-21 22:17:23.000000000 -0400
26325+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-08-26 19:49:56.000000000 -0400
26326@@ -200,7 +200,7 @@ struct methods {
26327 const char desc[8];
26328 void (*loadbios)(struct drm_device *, uint8_t *);
26329 const bool rw;
26330-};
26331+} __do_const;
26332
26333 static struct methods shadow_methods[] = {
26334 { "PRAMIN", load_vbios_pramin, true },
26335@@ -5488,7 +5488,7 @@ parse_bit_displayport_tbl_entry(struct d
26336 struct bit_table {
26337 const char id;
26338 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26339-};
26340+} __no_const;
26341
26342 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26343
26344diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h
26345--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-07-21 22:17:23.000000000 -0400
26346+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-23 21:47:55.000000000 -0400
26347@@ -227,7 +227,7 @@ struct nouveau_channel {
26348 struct list_head pending;
26349 uint32_t sequence;
26350 uint32_t sequence_ack;
26351- atomic_t last_sequence_irq;
26352+ atomic_unchecked_t last_sequence_irq;
26353 } fence;
26354
26355 /* DMA push buffer */
26356@@ -304,7 +304,7 @@ struct nouveau_exec_engine {
26357 u32 handle, u16 class);
26358 void (*set_tile_region)(struct drm_device *dev, int i);
26359 void (*tlb_flush)(struct drm_device *, int engine);
26360-};
26361+} __no_const;
26362
26363 struct nouveau_instmem_engine {
26364 void *priv;
26365@@ -325,13 +325,13 @@ struct nouveau_instmem_engine {
26366 struct nouveau_mc_engine {
26367 int (*init)(struct drm_device *dev);
26368 void (*takedown)(struct drm_device *dev);
26369-};
26370+} __no_const;
26371
26372 struct nouveau_timer_engine {
26373 int (*init)(struct drm_device *dev);
26374 void (*takedown)(struct drm_device *dev);
26375 uint64_t (*read)(struct drm_device *dev);
26376-};
26377+} __no_const;
26378
26379 struct nouveau_fb_engine {
26380 int num_tiles;
26381@@ -494,7 +494,7 @@ struct nouveau_vram_engine {
26382 void (*put)(struct drm_device *, struct nouveau_mem **);
26383
26384 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26385-};
26386+} __no_const;
26387
26388 struct nouveau_engine {
26389 struct nouveau_instmem_engine instmem;
26390@@ -640,7 +640,7 @@ struct drm_nouveau_private {
26391 struct drm_global_reference mem_global_ref;
26392 struct ttm_bo_global_ref bo_global_ref;
26393 struct ttm_bo_device bdev;
26394- atomic_t validate_sequence;
26395+ atomic_unchecked_t validate_sequence;
26396 } ttm;
26397
26398 struct {
26399diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c
26400--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-07-21 22:17:23.000000000 -0400
26401+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-23 21:47:55.000000000 -0400
26402@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26403 if (USE_REFCNT(dev))
26404 sequence = nvchan_rd32(chan, 0x48);
26405 else
26406- sequence = atomic_read(&chan->fence.last_sequence_irq);
26407+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26408
26409 if (chan->fence.sequence_ack == sequence)
26410 goto out;
26411@@ -544,7 +544,7 @@ nouveau_fence_channel_init(struct nouvea
26412
26413 INIT_LIST_HEAD(&chan->fence.pending);
26414 spin_lock_init(&chan->fence.lock);
26415- atomic_set(&chan->fence.last_sequence_irq, 0);
26416+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26417 return 0;
26418 }
26419
26420diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c
26421--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-07-21 22:17:23.000000000 -0400
26422+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-23 21:47:55.000000000 -0400
26423@@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
26424 int trycnt = 0;
26425 int ret, i;
26426
26427- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26428+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26429 retry:
26430 if (++trycnt > 100000) {
26431 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26432diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c
26433--- linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-07-21 22:17:23.000000000 -0400
26434+++ linux-3.0.7/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-23 21:47:55.000000000 -0400
26435@@ -488,7 +488,7 @@ static bool nouveau_switcheroo_can_switc
26436 bool can_switch;
26437
26438 spin_lock(&dev->count_lock);
26439- can_switch = (dev->open_count == 0);
26440+ can_switch = (local_read(&dev->open_count) == 0);
26441 spin_unlock(&dev->count_lock);
26442 return can_switch;
26443 }
26444diff -urNp linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c
26445--- linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-07-21 22:17:23.000000000 -0400
26446+++ linux-3.0.7/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-23 21:47:55.000000000 -0400
26447@@ -560,7 +560,7 @@ static int
26448 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
26449 u32 class, u32 mthd, u32 data)
26450 {
26451- atomic_set(&chan->fence.last_sequence_irq, data);
26452+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
26453 return 0;
26454 }
26455
26456diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c
26457--- linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-07-21 22:17:23.000000000 -0400
26458+++ linux-3.0.7/drivers/gpu/drm/r128/r128_cce.c 2011-08-23 21:47:55.000000000 -0400
26459@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
26460
26461 /* GH: Simple idle check.
26462 */
26463- atomic_set(&dev_priv->idle_count, 0);
26464+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26465
26466 /* We don't support anything other than bus-mastering ring mode,
26467 * but the ring can be in either AGP or PCI space for the ring
26468diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h
26469--- linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-07-21 22:17:23.000000000 -0400
26470+++ linux-3.0.7/drivers/gpu/drm/r128/r128_drv.h 2011-08-23 21:47:55.000000000 -0400
26471@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
26472 int is_pci;
26473 unsigned long cce_buffers_offset;
26474
26475- atomic_t idle_count;
26476+ atomic_unchecked_t idle_count;
26477
26478 int page_flipping;
26479 int current_page;
26480 u32 crtc_offset;
26481 u32 crtc_offset_cntl;
26482
26483- atomic_t vbl_received;
26484+ atomic_unchecked_t vbl_received;
26485
26486 u32 color_fmt;
26487 unsigned int front_offset;
26488diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c
26489--- linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-07-21 22:17:23.000000000 -0400
26490+++ linux-3.0.7/drivers/gpu/drm/r128/r128_irq.c 2011-08-23 21:47:55.000000000 -0400
26491@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
26492 if (crtc != 0)
26493 return 0;
26494
26495- return atomic_read(&dev_priv->vbl_received);
26496+ return atomic_read_unchecked(&dev_priv->vbl_received);
26497 }
26498
26499 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
26500@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
26501 /* VBLANK interrupt */
26502 if (status & R128_CRTC_VBLANK_INT) {
26503 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
26504- atomic_inc(&dev_priv->vbl_received);
26505+ atomic_inc_unchecked(&dev_priv->vbl_received);
26506 drm_handle_vblank(dev, 0);
26507 return IRQ_HANDLED;
26508 }
26509diff -urNp linux-3.0.7/drivers/gpu/drm/r128/r128_state.c linux-3.0.7/drivers/gpu/drm/r128/r128_state.c
26510--- linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-07-21 22:17:23.000000000 -0400
26511+++ linux-3.0.7/drivers/gpu/drm/r128/r128_state.c 2011-08-23 21:47:55.000000000 -0400
26512@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
26513
26514 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
26515 {
26516- if (atomic_read(&dev_priv->idle_count) == 0)
26517+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
26518 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
26519 else
26520- atomic_set(&dev_priv->idle_count, 0);
26521+ atomic_set_unchecked(&dev_priv->idle_count, 0);
26522 }
26523
26524 #endif
26525diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/atom.c linux-3.0.7/drivers/gpu/drm/radeon/atom.c
26526--- linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-07-21 22:17:23.000000000 -0400
26527+++ linux-3.0.7/drivers/gpu/drm/radeon/atom.c 2011-08-23 21:48:14.000000000 -0400
26528@@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
26529 char name[512];
26530 int i;
26531
26532+ pax_track_stack();
26533+
26534 ctx->card = card;
26535 ctx->bios = bios;
26536
26537diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c
26538--- linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-07-21 22:17:23.000000000 -0400
26539+++ linux-3.0.7/drivers/gpu/drm/radeon/mkregtable.c 2011-08-23 21:47:55.000000000 -0400
26540@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
26541 regex_t mask_rex;
26542 regmatch_t match[4];
26543 char buf[1024];
26544- size_t end;
26545+ long end;
26546 int len;
26547 int done = 0;
26548 int r;
26549 unsigned o;
26550 struct offset *offset;
26551 char last_reg_s[10];
26552- int last_reg;
26553+ unsigned long last_reg;
26554
26555 if (regcomp
26556 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
26557diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c
26558--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-07-21 22:17:23.000000000 -0400
26559+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-23 21:48:14.000000000 -0400
26560@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
26561 struct radeon_gpio_rec gpio;
26562 struct radeon_hpd hpd;
26563
26564+ pax_track_stack();
26565+
26566 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
26567 return false;
26568
26569diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c
26570--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-09-02 18:11:21.000000000 -0400
26571+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_device.c 2011-08-23 21:47:55.000000000 -0400
26572@@ -678,7 +678,7 @@ static bool radeon_switcheroo_can_switch
26573 bool can_switch;
26574
26575 spin_lock(&dev->count_lock);
26576- can_switch = (dev->open_count == 0);
26577+ can_switch = (local_read(&dev->open_count) == 0);
26578 spin_unlock(&dev->count_lock);
26579 return can_switch;
26580 }
26581diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c
26582--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-09-02 18:11:21.000000000 -0400
26583+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_display.c 2011-08-23 21:48:14.000000000 -0400
26584@@ -946,6 +946,8 @@ void radeon_compute_pll_legacy(struct ra
26585 uint32_t post_div;
26586 u32 pll_out_min, pll_out_max;
26587
26588+ pax_track_stack();
26589+
26590 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
26591 freq = freq * 1000;
26592
26593diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h
26594--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-07-21 22:17:23.000000000 -0400
26595+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-23 21:47:55.000000000 -0400
26596@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
26597
26598 /* SW interrupt */
26599 wait_queue_head_t swi_queue;
26600- atomic_t swi_emitted;
26601+ atomic_unchecked_t swi_emitted;
26602 int vblank_crtc;
26603 uint32_t irq_enable_reg;
26604 uint32_t r500_disp_irq_reg;
26605diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c
26606--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-07-21 22:17:23.000000000 -0400
26607+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-23 21:47:55.000000000 -0400
26608@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
26609 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
26610 return 0;
26611 }
26612- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
26613+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
26614 if (!rdev->cp.ready)
26615 /* FIXME: cp is not running assume everythings is done right
26616 * away
26617@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
26618 return r;
26619 }
26620 radeon_fence_write(rdev, 0);
26621- atomic_set(&rdev->fence_drv.seq, 0);
26622+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
26623 INIT_LIST_HEAD(&rdev->fence_drv.created);
26624 INIT_LIST_HEAD(&rdev->fence_drv.emited);
26625 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
26626diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon.h linux-3.0.7/drivers/gpu/drm/radeon/radeon.h
26627--- linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:54:53.000000000 -0400
26628+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon.h 2011-10-16 21:55:27.000000000 -0400
26629@@ -191,7 +191,7 @@ extern int sumo_get_temp(struct radeon_d
26630 */
26631 struct radeon_fence_driver {
26632 uint32_t scratch_reg;
26633- atomic_t seq;
26634+ atomic_unchecked_t seq;
26635 uint32_t last_seq;
26636 unsigned long last_jiffies;
26637 unsigned long last_timeout;
26638@@ -961,7 +961,7 @@ struct radeon_asic {
26639 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
26640 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
26641 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
26642-};
26643+} __no_const;
26644
26645 /*
26646 * Asic structures
26647diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c
26648--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-07-21 22:17:23.000000000 -0400
26649+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-23 21:47:55.000000000 -0400
26650@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
26651 request = compat_alloc_user_space(sizeof(*request));
26652 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
26653 || __put_user(req32.param, &request->param)
26654- || __put_user((void __user *)(unsigned long)req32.value,
26655+ || __put_user((unsigned long)req32.value,
26656 &request->value))
26657 return -EFAULT;
26658
26659diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c
26660--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-07-21 22:17:23.000000000 -0400
26661+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-23 21:47:55.000000000 -0400
26662@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
26663 unsigned int ret;
26664 RING_LOCALS;
26665
26666- atomic_inc(&dev_priv->swi_emitted);
26667- ret = atomic_read(&dev_priv->swi_emitted);
26668+ atomic_inc_unchecked(&dev_priv->swi_emitted);
26669+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
26670
26671 BEGIN_RING(4);
26672 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
26673@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
26674 drm_radeon_private_t *dev_priv =
26675 (drm_radeon_private_t *) dev->dev_private;
26676
26677- atomic_set(&dev_priv->swi_emitted, 0);
26678+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
26679 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
26680
26681 dev->max_vblank_count = 0x001fffff;
26682diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c
26683--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-07-21 22:17:23.000000000 -0400
26684+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_state.c 2011-08-23 21:47:55.000000000 -0400
26685@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
26686 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
26687 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
26688
26689- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26690+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
26691 sarea_priv->nbox * sizeof(depth_boxes[0])))
26692 return -EFAULT;
26693
26694@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
26695 {
26696 drm_radeon_private_t *dev_priv = dev->dev_private;
26697 drm_radeon_getparam_t *param = data;
26698- int value;
26699+ int value = 0;
26700
26701 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
26702
26703diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c
26704--- linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:54:53.000000000 -0400
26705+++ linux-3.0.7/drivers/gpu/drm/radeon/radeon_ttm.c 2011-10-16 21:55:27.000000000 -0400
26706@@ -649,8 +649,10 @@ int radeon_mmap(struct file *filp, struc
26707 }
26708 if (unlikely(ttm_vm_ops == NULL)) {
26709 ttm_vm_ops = vma->vm_ops;
26710- radeon_ttm_vm_ops = *ttm_vm_ops;
26711- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26712+ pax_open_kernel();
26713+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
26714+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
26715+ pax_close_kernel();
26716 }
26717 vma->vm_ops = &radeon_ttm_vm_ops;
26718 return 0;
26719diff -urNp linux-3.0.7/drivers/gpu/drm/radeon/rs690.c linux-3.0.7/drivers/gpu/drm/radeon/rs690.c
26720--- linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-07-21 22:17:23.000000000 -0400
26721+++ linux-3.0.7/drivers/gpu/drm/radeon/rs690.c 2011-08-23 21:47:55.000000000 -0400
26722@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
26723 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
26724 rdev->pm.sideport_bandwidth.full)
26725 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
26726- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
26727+ read_delay_latency.full = dfixed_const(800 * 1000);
26728 read_delay_latency.full = dfixed_div(read_delay_latency,
26729 rdev->pm.igp_sideport_mclk);
26730+ a.full = dfixed_const(370);
26731+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
26732 } else {
26733 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
26734 rdev->pm.k8_bandwidth.full)
26735diff -urNp linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c
26736--- linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-07-21 22:17:23.000000000 -0400
26737+++ linux-3.0.7/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-23 21:47:55.000000000 -0400
26738@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
26739 static int ttm_pool_mm_shrink(struct shrinker *shrink,
26740 struct shrink_control *sc)
26741 {
26742- static atomic_t start_pool = ATOMIC_INIT(0);
26743+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
26744 unsigned i;
26745- unsigned pool_offset = atomic_add_return(1, &start_pool);
26746+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
26747 struct ttm_page_pool *pool;
26748 int shrink_pages = sc->nr_to_scan;
26749
26750diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_drv.h linux-3.0.7/drivers/gpu/drm/via/via_drv.h
26751--- linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-07-21 22:17:23.000000000 -0400
26752+++ linux-3.0.7/drivers/gpu/drm/via/via_drv.h 2011-08-23 21:47:55.000000000 -0400
26753@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
26754 typedef uint32_t maskarray_t[5];
26755
26756 typedef struct drm_via_irq {
26757- atomic_t irq_received;
26758+ atomic_unchecked_t irq_received;
26759 uint32_t pending_mask;
26760 uint32_t enable_mask;
26761 wait_queue_head_t irq_queue;
26762@@ -75,7 +75,7 @@ typedef struct drm_via_private {
26763 struct timeval last_vblank;
26764 int last_vblank_valid;
26765 unsigned usec_per_vblank;
26766- atomic_t vbl_received;
26767+ atomic_unchecked_t vbl_received;
26768 drm_via_state_t hc_state;
26769 char pci_buf[VIA_PCI_BUF_SIZE];
26770 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
26771diff -urNp linux-3.0.7/drivers/gpu/drm/via/via_irq.c linux-3.0.7/drivers/gpu/drm/via/via_irq.c
26772--- linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-07-21 22:17:23.000000000 -0400
26773+++ linux-3.0.7/drivers/gpu/drm/via/via_irq.c 2011-08-23 21:47:55.000000000 -0400
26774@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
26775 if (crtc != 0)
26776 return 0;
26777
26778- return atomic_read(&dev_priv->vbl_received);
26779+ return atomic_read_unchecked(&dev_priv->vbl_received);
26780 }
26781
26782 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
26783@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
26784
26785 status = VIA_READ(VIA_REG_INTERRUPT);
26786 if (status & VIA_IRQ_VBLANK_PENDING) {
26787- atomic_inc(&dev_priv->vbl_received);
26788- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
26789+ atomic_inc_unchecked(&dev_priv->vbl_received);
26790+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
26791 do_gettimeofday(&cur_vblank);
26792 if (dev_priv->last_vblank_valid) {
26793 dev_priv->usec_per_vblank =
26794@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26795 dev_priv->last_vblank = cur_vblank;
26796 dev_priv->last_vblank_valid = 1;
26797 }
26798- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
26799+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
26800 DRM_DEBUG("US per vblank is: %u\n",
26801 dev_priv->usec_per_vblank);
26802 }
26803@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
26804
26805 for (i = 0; i < dev_priv->num_irqs; ++i) {
26806 if (status & cur_irq->pending_mask) {
26807- atomic_inc(&cur_irq->irq_received);
26808+ atomic_inc_unchecked(&cur_irq->irq_received);
26809 DRM_WAKEUP(&cur_irq->irq_queue);
26810 handled = 1;
26811 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
26812@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
26813 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26814 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
26815 masks[irq][4]));
26816- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
26817+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
26818 } else {
26819 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
26820 (((cur_irq_sequence =
26821- atomic_read(&cur_irq->irq_received)) -
26822+ atomic_read_unchecked(&cur_irq->irq_received)) -
26823 *sequence) <= (1 << 23)));
26824 }
26825 *sequence = cur_irq_sequence;
26826@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
26827 }
26828
26829 for (i = 0; i < dev_priv->num_irqs; ++i) {
26830- atomic_set(&cur_irq->irq_received, 0);
26831+ atomic_set_unchecked(&cur_irq->irq_received, 0);
26832 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
26833 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
26834 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
26835@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
26836 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
26837 case VIA_IRQ_RELATIVE:
26838 irqwait->request.sequence +=
26839- atomic_read(&cur_irq->irq_received);
26840+ atomic_read_unchecked(&cur_irq->irq_received);
26841 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
26842 case VIA_IRQ_ABSOLUTE:
26843 break;
26844diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
26845--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-07-21 22:17:23.000000000 -0400
26846+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-23 21:47:55.000000000 -0400
26847@@ -240,7 +240,7 @@ struct vmw_private {
26848 * Fencing and IRQs.
26849 */
26850
26851- atomic_t fence_seq;
26852+ atomic_unchecked_t fence_seq;
26853 wait_queue_head_t fence_queue;
26854 wait_queue_head_t fifo_queue;
26855 atomic_t fence_queue_waiters;
26856diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
26857--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-07-21 22:17:23.000000000 -0400
26858+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-10-06 04:17:55.000000000 -0400
26859@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
26860 struct drm_vmw_fence_rep fence_rep;
26861 struct drm_vmw_fence_rep __user *user_fence_rep;
26862 int ret;
26863- void *user_cmd;
26864+ void __user *user_cmd;
26865 void *cmd;
26866 uint32_t sequence;
26867 struct vmw_sw_context *sw_context = &dev_priv->ctx;
26868diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
26869--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-07-21 22:17:23.000000000 -0400
26870+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-23 21:47:55.000000000 -0400
26871@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
26872 while (!vmw_lag_lt(queue, us)) {
26873 spin_lock(&queue->lock);
26874 if (list_empty(&queue->head))
26875- sequence = atomic_read(&dev_priv->fence_seq);
26876+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26877 else {
26878 fence = list_first_entry(&queue->head,
26879 struct vmw_fence, head);
26880diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
26881--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-07-21 22:17:23.000000000 -0400
26882+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-10-06 04:17:55.000000000 -0400
26883@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
26884 (unsigned int) min,
26885 (unsigned int) fifo->capabilities);
26886
26887- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26888+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
26889 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
26890 vmw_fence_queue_init(&fifo->fence_queue);
26891 return vmw_fifo_send_fence(dev_priv, &dummy);
26892@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
26893 if (reserveable)
26894 iowrite32(bytes, fifo_mem +
26895 SVGA_FIFO_RESERVED);
26896- return fifo_mem + (next_cmd >> 2);
26897+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
26898 } else {
26899 need_bounce = true;
26900 }
26901@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26902
26903 fm = vmw_fifo_reserve(dev_priv, bytes);
26904 if (unlikely(fm == NULL)) {
26905- *sequence = atomic_read(&dev_priv->fence_seq);
26906+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
26907 ret = -ENOMEM;
26908 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
26909 false, 3*HZ);
26910@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
26911 }
26912
26913 do {
26914- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
26915+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
26916 } while (*sequence == 0);
26917
26918 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
26919diff -urNp linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
26920--- linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-07-21 22:17:23.000000000 -0400
26921+++ linux-3.0.7/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-23 21:47:55.000000000 -0400
26922@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
26923 * emitted. Then the fence is stale and signaled.
26924 */
26925
26926- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
26927+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
26928 > VMW_FENCE_WRAP);
26929
26930 return ret;
26931@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
26932
26933 if (fifo_idle)
26934 down_read(&fifo_state->rwsem);
26935- signal_seq = atomic_read(&dev_priv->fence_seq);
26936+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
26937 ret = 0;
26938
26939 for (;;) {
26940diff -urNp linux-3.0.7/drivers/hid/hid-core.c linux-3.0.7/drivers/hid/hid-core.c
26941--- linux-3.0.7/drivers/hid/hid-core.c 2011-07-21 22:17:23.000000000 -0400
26942+++ linux-3.0.7/drivers/hid/hid-core.c 2011-08-23 21:47:55.000000000 -0400
26943@@ -1923,7 +1923,7 @@ static bool hid_ignore(struct hid_device
26944
26945 int hid_add_device(struct hid_device *hdev)
26946 {
26947- static atomic_t id = ATOMIC_INIT(0);
26948+ static atomic_unchecked_t id = ATOMIC_INIT(0);
26949 int ret;
26950
26951 if (WARN_ON(hdev->status & HID_STAT_ADDED))
26952@@ -1938,7 +1938,7 @@ int hid_add_device(struct hid_device *hd
26953 /* XXX hack, any other cleaner solution after the driver core
26954 * is converted to allow more than 20 bytes as the device name? */
26955 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
26956- hdev->vendor, hdev->product, atomic_inc_return(&id));
26957+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
26958
26959 hid_debug_register(hdev, dev_name(&hdev->dev));
26960 ret = device_add(&hdev->dev);
26961diff -urNp linux-3.0.7/drivers/hid/usbhid/hiddev.c linux-3.0.7/drivers/hid/usbhid/hiddev.c
26962--- linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-07-21 22:17:23.000000000 -0400
26963+++ linux-3.0.7/drivers/hid/usbhid/hiddev.c 2011-08-23 21:47:55.000000000 -0400
26964@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
26965 break;
26966
26967 case HIDIOCAPPLICATION:
26968- if (arg < 0 || arg >= hid->maxapplication)
26969+ if (arg >= hid->maxapplication)
26970 break;
26971
26972 for (i = 0; i < hid->maxcollection; i++)
26973diff -urNp linux-3.0.7/drivers/hwmon/acpi_power_meter.c linux-3.0.7/drivers/hwmon/acpi_power_meter.c
26974--- linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-07-21 22:17:23.000000000 -0400
26975+++ linux-3.0.7/drivers/hwmon/acpi_power_meter.c 2011-08-23 21:47:55.000000000 -0400
26976@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
26977 return res;
26978
26979 temp /= 1000;
26980- if (temp < 0)
26981- return -EINVAL;
26982
26983 mutex_lock(&resource->lock);
26984 resource->trip[attr->index - 7] = temp;
26985diff -urNp linux-3.0.7/drivers/hwmon/sht15.c linux-3.0.7/drivers/hwmon/sht15.c
26986--- linux-3.0.7/drivers/hwmon/sht15.c 2011-07-21 22:17:23.000000000 -0400
26987+++ linux-3.0.7/drivers/hwmon/sht15.c 2011-08-23 21:47:55.000000000 -0400
26988@@ -166,7 +166,7 @@ struct sht15_data {
26989 int supply_uV;
26990 bool supply_uV_valid;
26991 struct work_struct update_supply_work;
26992- atomic_t interrupt_handled;
26993+ atomic_unchecked_t interrupt_handled;
26994 };
26995
26996 /**
26997@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
26998 return ret;
26999
27000 gpio_direction_input(data->pdata->gpio_data);
27001- atomic_set(&data->interrupt_handled, 0);
27002+ atomic_set_unchecked(&data->interrupt_handled, 0);
27003
27004 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27005 if (gpio_get_value(data->pdata->gpio_data) == 0) {
27006 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27007 /* Only relevant if the interrupt hasn't occurred. */
27008- if (!atomic_read(&data->interrupt_handled))
27009+ if (!atomic_read_unchecked(&data->interrupt_handled))
27010 schedule_work(&data->read_work);
27011 }
27012 ret = wait_event_timeout(data->wait_queue,
27013@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27014
27015 /* First disable the interrupt */
27016 disable_irq_nosync(irq);
27017- atomic_inc(&data->interrupt_handled);
27018+ atomic_inc_unchecked(&data->interrupt_handled);
27019 /* Then schedule a reading work struct */
27020 if (data->state != SHT15_READING_NOTHING)
27021 schedule_work(&data->read_work);
27022@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27023 * If not, then start the interrupt again - care here as could
27024 * have gone low in meantime so verify it hasn't!
27025 */
27026- atomic_set(&data->interrupt_handled, 0);
27027+ atomic_set_unchecked(&data->interrupt_handled, 0);
27028 enable_irq(gpio_to_irq(data->pdata->gpio_data));
27029 /* If still not occurred or another handler has been scheduled */
27030 if (gpio_get_value(data->pdata->gpio_data)
27031- || atomic_read(&data->interrupt_handled))
27032+ || atomic_read_unchecked(&data->interrupt_handled))
27033 return;
27034 }
27035
27036diff -urNp linux-3.0.7/drivers/hwmon/w83791d.c linux-3.0.7/drivers/hwmon/w83791d.c
27037--- linux-3.0.7/drivers/hwmon/w83791d.c 2011-07-21 22:17:23.000000000 -0400
27038+++ linux-3.0.7/drivers/hwmon/w83791d.c 2011-08-23 21:47:55.000000000 -0400
27039@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
27040 struct i2c_board_info *info);
27041 static int w83791d_remove(struct i2c_client *client);
27042
27043-static int w83791d_read(struct i2c_client *client, u8 register);
27044-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
27045+static int w83791d_read(struct i2c_client *client, u8 reg);
27046+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
27047 static struct w83791d_data *w83791d_update_device(struct device *dev);
27048
27049 #ifdef DEBUG
27050diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c
27051--- linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-07-21 22:17:23.000000000 -0400
27052+++ linux-3.0.7/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:47:55.000000000 -0400
27053@@ -43,7 +43,7 @@
27054 extern struct i2c_adapter amd756_smbus;
27055
27056 static struct i2c_adapter *s4882_adapter;
27057-static struct i2c_algorithm *s4882_algo;
27058+static i2c_algorithm_no_const *s4882_algo;
27059
27060 /* Wrapper access functions for multiplexed SMBus */
27061 static DEFINE_MUTEX(amd756_lock);
27062diff -urNp linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c
27063--- linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-07-21 22:17:23.000000000 -0400
27064+++ linux-3.0.7/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:47:55.000000000 -0400
27065@@ -41,7 +41,7 @@
27066 extern struct i2c_adapter *nforce2_smbus;
27067
27068 static struct i2c_adapter *s4985_adapter;
27069-static struct i2c_algorithm *s4985_algo;
27070+static i2c_algorithm_no_const *s4985_algo;
27071
27072 /* Wrapper access functions for multiplexed SMBus */
27073 static DEFINE_MUTEX(nforce2_lock);
27074diff -urNp linux-3.0.7/drivers/i2c/i2c-mux.c linux-3.0.7/drivers/i2c/i2c-mux.c
27075--- linux-3.0.7/drivers/i2c/i2c-mux.c 2011-07-21 22:17:23.000000000 -0400
27076+++ linux-3.0.7/drivers/i2c/i2c-mux.c 2011-08-23 21:47:55.000000000 -0400
27077@@ -28,7 +28,7 @@
27078 /* multiplexer per channel data */
27079 struct i2c_mux_priv {
27080 struct i2c_adapter adap;
27081- struct i2c_algorithm algo;
27082+ i2c_algorithm_no_const algo;
27083
27084 struct i2c_adapter *parent;
27085 void *mux_dev; /* the mux chip/device */
27086diff -urNp linux-3.0.7/drivers/ide/aec62xx.c linux-3.0.7/drivers/ide/aec62xx.c
27087--- linux-3.0.7/drivers/ide/aec62xx.c 2011-07-21 22:17:23.000000000 -0400
27088+++ linux-3.0.7/drivers/ide/aec62xx.c 2011-10-11 10:44:33.000000000 -0400
27089@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27090 .cable_detect = atp86x_cable_detect,
27091 };
27092
27093-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27094+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27095 { /* 0: AEC6210 */
27096 .name = DRV_NAME,
27097 .init_chipset = init_chipset_aec62xx,
27098diff -urNp linux-3.0.7/drivers/ide/alim15x3.c linux-3.0.7/drivers/ide/alim15x3.c
27099--- linux-3.0.7/drivers/ide/alim15x3.c 2011-07-21 22:17:23.000000000 -0400
27100+++ linux-3.0.7/drivers/ide/alim15x3.c 2011-10-11 10:44:33.000000000 -0400
27101@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27102 .dma_sff_read_status = ide_dma_sff_read_status,
27103 };
27104
27105-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27106+static const struct ide_port_info ali15x3_chipset __devinitconst = {
27107 .name = DRV_NAME,
27108 .init_chipset = init_chipset_ali15x3,
27109 .init_hwif = init_hwif_ali15x3,
27110diff -urNp linux-3.0.7/drivers/ide/amd74xx.c linux-3.0.7/drivers/ide/amd74xx.c
27111--- linux-3.0.7/drivers/ide/amd74xx.c 2011-07-21 22:17:23.000000000 -0400
27112+++ linux-3.0.7/drivers/ide/amd74xx.c 2011-10-11 10:44:33.000000000 -0400
27113@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27114 .udma_mask = udma, \
27115 }
27116
27117-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27118+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27119 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27120 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27121 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27122diff -urNp linux-3.0.7/drivers/ide/atiixp.c linux-3.0.7/drivers/ide/atiixp.c
27123--- linux-3.0.7/drivers/ide/atiixp.c 2011-07-21 22:17:23.000000000 -0400
27124+++ linux-3.0.7/drivers/ide/atiixp.c 2011-10-11 10:44:33.000000000 -0400
27125@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27126 .cable_detect = atiixp_cable_detect,
27127 };
27128
27129-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27130+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27131 { /* 0: IXP200/300/400/700 */
27132 .name = DRV_NAME,
27133 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27134diff -urNp linux-3.0.7/drivers/ide/cmd64x.c linux-3.0.7/drivers/ide/cmd64x.c
27135--- linux-3.0.7/drivers/ide/cmd64x.c 2011-07-21 22:17:23.000000000 -0400
27136+++ linux-3.0.7/drivers/ide/cmd64x.c 2011-10-11 10:44:33.000000000 -0400
27137@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27138 .dma_sff_read_status = ide_dma_sff_read_status,
27139 };
27140
27141-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27142+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27143 { /* 0: CMD643 */
27144 .name = DRV_NAME,
27145 .init_chipset = init_chipset_cmd64x,
27146diff -urNp linux-3.0.7/drivers/ide/cs5520.c linux-3.0.7/drivers/ide/cs5520.c
27147--- linux-3.0.7/drivers/ide/cs5520.c 2011-07-21 22:17:23.000000000 -0400
27148+++ linux-3.0.7/drivers/ide/cs5520.c 2011-10-11 10:44:33.000000000 -0400
27149@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27150 .set_dma_mode = cs5520_set_dma_mode,
27151 };
27152
27153-static const struct ide_port_info cyrix_chipset __devinitdata = {
27154+static const struct ide_port_info cyrix_chipset __devinitconst = {
27155 .name = DRV_NAME,
27156 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27157 .port_ops = &cs5520_port_ops,
27158diff -urNp linux-3.0.7/drivers/ide/cs5530.c linux-3.0.7/drivers/ide/cs5530.c
27159--- linux-3.0.7/drivers/ide/cs5530.c 2011-07-21 22:17:23.000000000 -0400
27160+++ linux-3.0.7/drivers/ide/cs5530.c 2011-10-11 10:44:33.000000000 -0400
27161@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27162 .udma_filter = cs5530_udma_filter,
27163 };
27164
27165-static const struct ide_port_info cs5530_chipset __devinitdata = {
27166+static const struct ide_port_info cs5530_chipset __devinitconst = {
27167 .name = DRV_NAME,
27168 .init_chipset = init_chipset_cs5530,
27169 .init_hwif = init_hwif_cs5530,
27170diff -urNp linux-3.0.7/drivers/ide/cs5535.c linux-3.0.7/drivers/ide/cs5535.c
27171--- linux-3.0.7/drivers/ide/cs5535.c 2011-07-21 22:17:23.000000000 -0400
27172+++ linux-3.0.7/drivers/ide/cs5535.c 2011-10-11 10:44:33.000000000 -0400
27173@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27174 .cable_detect = cs5535_cable_detect,
27175 };
27176
27177-static const struct ide_port_info cs5535_chipset __devinitdata = {
27178+static const struct ide_port_info cs5535_chipset __devinitconst = {
27179 .name = DRV_NAME,
27180 .port_ops = &cs5535_port_ops,
27181 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27182diff -urNp linux-3.0.7/drivers/ide/cy82c693.c linux-3.0.7/drivers/ide/cy82c693.c
27183--- linux-3.0.7/drivers/ide/cy82c693.c 2011-07-21 22:17:23.000000000 -0400
27184+++ linux-3.0.7/drivers/ide/cy82c693.c 2011-10-11 10:44:33.000000000 -0400
27185@@ -161,7 +161,7 @@ static const struct ide_port_ops cy82c69
27186 .set_dma_mode = cy82c693_set_dma_mode,
27187 };
27188
27189-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27190+static const struct ide_port_info cy82c693_chipset __devinitconst = {
27191 .name = DRV_NAME,
27192 .init_iops = init_iops_cy82c693,
27193 .port_ops = &cy82c693_port_ops,
27194diff -urNp linux-3.0.7/drivers/ide/hpt366.c linux-3.0.7/drivers/ide/hpt366.c
27195--- linux-3.0.7/drivers/ide/hpt366.c 2011-07-21 22:17:23.000000000 -0400
27196+++ linux-3.0.7/drivers/ide/hpt366.c 2011-10-11 10:44:33.000000000 -0400
27197@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27198 }
27199 };
27200
27201-static const struct hpt_info hpt36x __devinitdata = {
27202+static const struct hpt_info hpt36x __devinitconst = {
27203 .chip_name = "HPT36x",
27204 .chip_type = HPT36x,
27205 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27206@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27207 .timings = &hpt36x_timings
27208 };
27209
27210-static const struct hpt_info hpt370 __devinitdata = {
27211+static const struct hpt_info hpt370 __devinitconst = {
27212 .chip_name = "HPT370",
27213 .chip_type = HPT370,
27214 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27215@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27216 .timings = &hpt37x_timings
27217 };
27218
27219-static const struct hpt_info hpt370a __devinitdata = {
27220+static const struct hpt_info hpt370a __devinitconst = {
27221 .chip_name = "HPT370A",
27222 .chip_type = HPT370A,
27223 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27224@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27225 .timings = &hpt37x_timings
27226 };
27227
27228-static const struct hpt_info hpt374 __devinitdata = {
27229+static const struct hpt_info hpt374 __devinitconst = {
27230 .chip_name = "HPT374",
27231 .chip_type = HPT374,
27232 .udma_mask = ATA_UDMA5,
27233@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27234 .timings = &hpt37x_timings
27235 };
27236
27237-static const struct hpt_info hpt372 __devinitdata = {
27238+static const struct hpt_info hpt372 __devinitconst = {
27239 .chip_name = "HPT372",
27240 .chip_type = HPT372,
27241 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27242@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27243 .timings = &hpt37x_timings
27244 };
27245
27246-static const struct hpt_info hpt372a __devinitdata = {
27247+static const struct hpt_info hpt372a __devinitconst = {
27248 .chip_name = "HPT372A",
27249 .chip_type = HPT372A,
27250 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27251@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27252 .timings = &hpt37x_timings
27253 };
27254
27255-static const struct hpt_info hpt302 __devinitdata = {
27256+static const struct hpt_info hpt302 __devinitconst = {
27257 .chip_name = "HPT302",
27258 .chip_type = HPT302,
27259 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27260@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27261 .timings = &hpt37x_timings
27262 };
27263
27264-static const struct hpt_info hpt371 __devinitdata = {
27265+static const struct hpt_info hpt371 __devinitconst = {
27266 .chip_name = "HPT371",
27267 .chip_type = HPT371,
27268 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27269@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27270 .timings = &hpt37x_timings
27271 };
27272
27273-static const struct hpt_info hpt372n __devinitdata = {
27274+static const struct hpt_info hpt372n __devinitconst = {
27275 .chip_name = "HPT372N",
27276 .chip_type = HPT372N,
27277 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27278@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27279 .timings = &hpt37x_timings
27280 };
27281
27282-static const struct hpt_info hpt302n __devinitdata = {
27283+static const struct hpt_info hpt302n __devinitconst = {
27284 .chip_name = "HPT302N",
27285 .chip_type = HPT302N,
27286 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27287@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27288 .timings = &hpt37x_timings
27289 };
27290
27291-static const struct hpt_info hpt371n __devinitdata = {
27292+static const struct hpt_info hpt371n __devinitconst = {
27293 .chip_name = "HPT371N",
27294 .chip_type = HPT371N,
27295 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27296@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27297 .dma_sff_read_status = ide_dma_sff_read_status,
27298 };
27299
27300-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27301+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27302 { /* 0: HPT36x */
27303 .name = DRV_NAME,
27304 .init_chipset = init_chipset_hpt366,
27305diff -urNp linux-3.0.7/drivers/ide/ide-cd.c linux-3.0.7/drivers/ide/ide-cd.c
27306--- linux-3.0.7/drivers/ide/ide-cd.c 2011-07-21 22:17:23.000000000 -0400
27307+++ linux-3.0.7/drivers/ide/ide-cd.c 2011-08-23 21:47:55.000000000 -0400
27308@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27309 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27310 if ((unsigned long)buf & alignment
27311 || blk_rq_bytes(rq) & q->dma_pad_mask
27312- || object_is_on_stack(buf))
27313+ || object_starts_on_stack(buf))
27314 drive->dma = 0;
27315 }
27316 }
27317diff -urNp linux-3.0.7/drivers/ide/ide-floppy.c linux-3.0.7/drivers/ide/ide-floppy.c
27318--- linux-3.0.7/drivers/ide/ide-floppy.c 2011-07-21 22:17:23.000000000 -0400
27319+++ linux-3.0.7/drivers/ide/ide-floppy.c 2011-08-23 21:48:14.000000000 -0400
27320@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27321 u8 pc_buf[256], header_len, desc_cnt;
27322 int i, rc = 1, blocks, length;
27323
27324+ pax_track_stack();
27325+
27326 ide_debug_log(IDE_DBG_FUNC, "enter");
27327
27328 drive->bios_cyl = 0;
27329diff -urNp linux-3.0.7/drivers/ide/ide-pci-generic.c linux-3.0.7/drivers/ide/ide-pci-generic.c
27330--- linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-07-21 22:17:23.000000000 -0400
27331+++ linux-3.0.7/drivers/ide/ide-pci-generic.c 2011-10-11 10:44:33.000000000 -0400
27332@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27333 .udma_mask = ATA_UDMA6, \
27334 }
27335
27336-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27337+static const struct ide_port_info generic_chipsets[] __devinitconst = {
27338 /* 0: Unknown */
27339 DECLARE_GENERIC_PCI_DEV(0),
27340
27341diff -urNp linux-3.0.7/drivers/ide/it8172.c linux-3.0.7/drivers/ide/it8172.c
27342--- linux-3.0.7/drivers/ide/it8172.c 2011-07-21 22:17:23.000000000 -0400
27343+++ linux-3.0.7/drivers/ide/it8172.c 2011-10-11 10:44:33.000000000 -0400
27344@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27345 .set_dma_mode = it8172_set_dma_mode,
27346 };
27347
27348-static const struct ide_port_info it8172_port_info __devinitdata = {
27349+static const struct ide_port_info it8172_port_info __devinitconst = {
27350 .name = DRV_NAME,
27351 .port_ops = &it8172_port_ops,
27352 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27353diff -urNp linux-3.0.7/drivers/ide/it8213.c linux-3.0.7/drivers/ide/it8213.c
27354--- linux-3.0.7/drivers/ide/it8213.c 2011-07-21 22:17:23.000000000 -0400
27355+++ linux-3.0.7/drivers/ide/it8213.c 2011-10-11 10:44:33.000000000 -0400
27356@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27357 .cable_detect = it8213_cable_detect,
27358 };
27359
27360-static const struct ide_port_info it8213_chipset __devinitdata = {
27361+static const struct ide_port_info it8213_chipset __devinitconst = {
27362 .name = DRV_NAME,
27363 .enablebits = { {0x41, 0x80, 0x80} },
27364 .port_ops = &it8213_port_ops,
27365diff -urNp linux-3.0.7/drivers/ide/it821x.c linux-3.0.7/drivers/ide/it821x.c
27366--- linux-3.0.7/drivers/ide/it821x.c 2011-07-21 22:17:23.000000000 -0400
27367+++ linux-3.0.7/drivers/ide/it821x.c 2011-10-11 10:44:33.000000000 -0400
27368@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27369 .cable_detect = it821x_cable_detect,
27370 };
27371
27372-static const struct ide_port_info it821x_chipset __devinitdata = {
27373+static const struct ide_port_info it821x_chipset __devinitconst = {
27374 .name = DRV_NAME,
27375 .init_chipset = init_chipset_it821x,
27376 .init_hwif = init_hwif_it821x,
27377diff -urNp linux-3.0.7/drivers/ide/jmicron.c linux-3.0.7/drivers/ide/jmicron.c
27378--- linux-3.0.7/drivers/ide/jmicron.c 2011-07-21 22:17:23.000000000 -0400
27379+++ linux-3.0.7/drivers/ide/jmicron.c 2011-10-11 10:44:33.000000000 -0400
27380@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27381 .cable_detect = jmicron_cable_detect,
27382 };
27383
27384-static const struct ide_port_info jmicron_chipset __devinitdata = {
27385+static const struct ide_port_info jmicron_chipset __devinitconst = {
27386 .name = DRV_NAME,
27387 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27388 .port_ops = &jmicron_port_ops,
27389diff -urNp linux-3.0.7/drivers/ide/ns87415.c linux-3.0.7/drivers/ide/ns87415.c
27390--- linux-3.0.7/drivers/ide/ns87415.c 2011-07-21 22:17:23.000000000 -0400
27391+++ linux-3.0.7/drivers/ide/ns87415.c 2011-10-11 10:44:33.000000000 -0400
27392@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27393 .dma_sff_read_status = superio_dma_sff_read_status,
27394 };
27395
27396-static const struct ide_port_info ns87415_chipset __devinitdata = {
27397+static const struct ide_port_info ns87415_chipset __devinitconst = {
27398 .name = DRV_NAME,
27399 .init_hwif = init_hwif_ns87415,
27400 .tp_ops = &ns87415_tp_ops,
27401diff -urNp linux-3.0.7/drivers/ide/opti621.c linux-3.0.7/drivers/ide/opti621.c
27402--- linux-3.0.7/drivers/ide/opti621.c 2011-07-21 22:17:23.000000000 -0400
27403+++ linux-3.0.7/drivers/ide/opti621.c 2011-10-11 10:44:33.000000000 -0400
27404@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27405 .set_pio_mode = opti621_set_pio_mode,
27406 };
27407
27408-static const struct ide_port_info opti621_chipset __devinitdata = {
27409+static const struct ide_port_info opti621_chipset __devinitconst = {
27410 .name = DRV_NAME,
27411 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27412 .port_ops = &opti621_port_ops,
27413diff -urNp linux-3.0.7/drivers/ide/pdc202xx_new.c linux-3.0.7/drivers/ide/pdc202xx_new.c
27414--- linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-07-21 22:17:23.000000000 -0400
27415+++ linux-3.0.7/drivers/ide/pdc202xx_new.c 2011-10-11 10:44:33.000000000 -0400
27416@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27417 .udma_mask = udma, \
27418 }
27419
27420-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27421+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27422 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27423 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27424 };
27425diff -urNp linux-3.0.7/drivers/ide/pdc202xx_old.c linux-3.0.7/drivers/ide/pdc202xx_old.c
27426--- linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-07-21 22:17:23.000000000 -0400
27427+++ linux-3.0.7/drivers/ide/pdc202xx_old.c 2011-10-11 10:44:33.000000000 -0400
27428@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27429 .max_sectors = sectors, \
27430 }
27431
27432-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27433+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27434 { /* 0: PDC20246 */
27435 .name = DRV_NAME,
27436 .init_chipset = init_chipset_pdc202xx,
27437diff -urNp linux-3.0.7/drivers/ide/piix.c linux-3.0.7/drivers/ide/piix.c
27438--- linux-3.0.7/drivers/ide/piix.c 2011-07-21 22:17:23.000000000 -0400
27439+++ linux-3.0.7/drivers/ide/piix.c 2011-10-11 10:44:33.000000000 -0400
27440@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27441 .udma_mask = udma, \
27442 }
27443
27444-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27445+static const struct ide_port_info piix_pci_info[] __devinitconst = {
27446 /* 0: MPIIX */
27447 { /*
27448 * MPIIX actually has only a single IDE channel mapped to
27449diff -urNp linux-3.0.7/drivers/ide/rz1000.c linux-3.0.7/drivers/ide/rz1000.c
27450--- linux-3.0.7/drivers/ide/rz1000.c 2011-07-21 22:17:23.000000000 -0400
27451+++ linux-3.0.7/drivers/ide/rz1000.c 2011-10-11 10:44:33.000000000 -0400
27452@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27453 }
27454 }
27455
27456-static const struct ide_port_info rz1000_chipset __devinitdata = {
27457+static const struct ide_port_info rz1000_chipset __devinitconst = {
27458 .name = DRV_NAME,
27459 .host_flags = IDE_HFLAG_NO_DMA,
27460 };
27461diff -urNp linux-3.0.7/drivers/ide/sc1200.c linux-3.0.7/drivers/ide/sc1200.c
27462--- linux-3.0.7/drivers/ide/sc1200.c 2011-07-21 22:17:23.000000000 -0400
27463+++ linux-3.0.7/drivers/ide/sc1200.c 2011-10-11 10:44:33.000000000 -0400
27464@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
27465 .dma_sff_read_status = ide_dma_sff_read_status,
27466 };
27467
27468-static const struct ide_port_info sc1200_chipset __devinitdata = {
27469+static const struct ide_port_info sc1200_chipset __devinitconst = {
27470 .name = DRV_NAME,
27471 .port_ops = &sc1200_port_ops,
27472 .dma_ops = &sc1200_dma_ops,
27473diff -urNp linux-3.0.7/drivers/ide/scc_pata.c linux-3.0.7/drivers/ide/scc_pata.c
27474--- linux-3.0.7/drivers/ide/scc_pata.c 2011-07-21 22:17:23.000000000 -0400
27475+++ linux-3.0.7/drivers/ide/scc_pata.c 2011-10-11 10:44:33.000000000 -0400
27476@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
27477 .dma_sff_read_status = scc_dma_sff_read_status,
27478 };
27479
27480-static const struct ide_port_info scc_chipset __devinitdata = {
27481+static const struct ide_port_info scc_chipset __devinitconst = {
27482 .name = "sccIDE",
27483 .init_iops = init_iops_scc,
27484 .init_dma = scc_init_dma,
27485diff -urNp linux-3.0.7/drivers/ide/serverworks.c linux-3.0.7/drivers/ide/serverworks.c
27486--- linux-3.0.7/drivers/ide/serverworks.c 2011-07-21 22:17:23.000000000 -0400
27487+++ linux-3.0.7/drivers/ide/serverworks.c 2011-10-11 10:44:33.000000000 -0400
27488@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
27489 .cable_detect = svwks_cable_detect,
27490 };
27491
27492-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
27493+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
27494 { /* 0: OSB4 */
27495 .name = DRV_NAME,
27496 .init_chipset = init_chipset_svwks,
27497diff -urNp linux-3.0.7/drivers/ide/setup-pci.c linux-3.0.7/drivers/ide/setup-pci.c
27498--- linux-3.0.7/drivers/ide/setup-pci.c 2011-07-21 22:17:23.000000000 -0400
27499+++ linux-3.0.7/drivers/ide/setup-pci.c 2011-08-23 21:48:14.000000000 -0400
27500@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
27501 int ret, i, n_ports = dev2 ? 4 : 2;
27502 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
27503
27504+ pax_track_stack();
27505+
27506 for (i = 0; i < n_ports / 2; i++) {
27507 ret = ide_setup_pci_controller(pdev[i], d, !i);
27508 if (ret < 0)
27509diff -urNp linux-3.0.7/drivers/ide/siimage.c linux-3.0.7/drivers/ide/siimage.c
27510--- linux-3.0.7/drivers/ide/siimage.c 2011-07-21 22:17:23.000000000 -0400
27511+++ linux-3.0.7/drivers/ide/siimage.c 2011-10-11 10:44:33.000000000 -0400
27512@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
27513 .udma_mask = ATA_UDMA6, \
27514 }
27515
27516-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
27517+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
27518 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
27519 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
27520 };
27521diff -urNp linux-3.0.7/drivers/ide/sis5513.c linux-3.0.7/drivers/ide/sis5513.c
27522--- linux-3.0.7/drivers/ide/sis5513.c 2011-07-21 22:17:23.000000000 -0400
27523+++ linux-3.0.7/drivers/ide/sis5513.c 2011-10-11 10:44:33.000000000 -0400
27524@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
27525 .cable_detect = sis_cable_detect,
27526 };
27527
27528-static const struct ide_port_info sis5513_chipset __devinitdata = {
27529+static const struct ide_port_info sis5513_chipset __devinitconst = {
27530 .name = DRV_NAME,
27531 .init_chipset = init_chipset_sis5513,
27532 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
27533diff -urNp linux-3.0.7/drivers/ide/sl82c105.c linux-3.0.7/drivers/ide/sl82c105.c
27534--- linux-3.0.7/drivers/ide/sl82c105.c 2011-07-21 22:17:23.000000000 -0400
27535+++ linux-3.0.7/drivers/ide/sl82c105.c 2011-10-11 10:44:33.000000000 -0400
27536@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
27537 .dma_sff_read_status = ide_dma_sff_read_status,
27538 };
27539
27540-static const struct ide_port_info sl82c105_chipset __devinitdata = {
27541+static const struct ide_port_info sl82c105_chipset __devinitconst = {
27542 .name = DRV_NAME,
27543 .init_chipset = init_chipset_sl82c105,
27544 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
27545diff -urNp linux-3.0.7/drivers/ide/slc90e66.c linux-3.0.7/drivers/ide/slc90e66.c
27546--- linux-3.0.7/drivers/ide/slc90e66.c 2011-07-21 22:17:23.000000000 -0400
27547+++ linux-3.0.7/drivers/ide/slc90e66.c 2011-10-11 10:44:33.000000000 -0400
27548@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
27549 .cable_detect = slc90e66_cable_detect,
27550 };
27551
27552-static const struct ide_port_info slc90e66_chipset __devinitdata = {
27553+static const struct ide_port_info slc90e66_chipset __devinitconst = {
27554 .name = DRV_NAME,
27555 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
27556 .port_ops = &slc90e66_port_ops,
27557diff -urNp linux-3.0.7/drivers/ide/tc86c001.c linux-3.0.7/drivers/ide/tc86c001.c
27558--- linux-3.0.7/drivers/ide/tc86c001.c 2011-07-21 22:17:23.000000000 -0400
27559+++ linux-3.0.7/drivers/ide/tc86c001.c 2011-10-11 10:44:33.000000000 -0400
27560@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
27561 .dma_sff_read_status = ide_dma_sff_read_status,
27562 };
27563
27564-static const struct ide_port_info tc86c001_chipset __devinitdata = {
27565+static const struct ide_port_info tc86c001_chipset __devinitconst = {
27566 .name = DRV_NAME,
27567 .init_hwif = init_hwif_tc86c001,
27568 .port_ops = &tc86c001_port_ops,
27569diff -urNp linux-3.0.7/drivers/ide/triflex.c linux-3.0.7/drivers/ide/triflex.c
27570--- linux-3.0.7/drivers/ide/triflex.c 2011-07-21 22:17:23.000000000 -0400
27571+++ linux-3.0.7/drivers/ide/triflex.c 2011-10-11 10:44:33.000000000 -0400
27572@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
27573 .set_dma_mode = triflex_set_mode,
27574 };
27575
27576-static const struct ide_port_info triflex_device __devinitdata = {
27577+static const struct ide_port_info triflex_device __devinitconst = {
27578 .name = DRV_NAME,
27579 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
27580 .port_ops = &triflex_port_ops,
27581diff -urNp linux-3.0.7/drivers/ide/trm290.c linux-3.0.7/drivers/ide/trm290.c
27582--- linux-3.0.7/drivers/ide/trm290.c 2011-07-21 22:17:23.000000000 -0400
27583+++ linux-3.0.7/drivers/ide/trm290.c 2011-10-11 10:44:33.000000000 -0400
27584@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
27585 .dma_check = trm290_dma_check,
27586 };
27587
27588-static const struct ide_port_info trm290_chipset __devinitdata = {
27589+static const struct ide_port_info trm290_chipset __devinitconst = {
27590 .name = DRV_NAME,
27591 .init_hwif = init_hwif_trm290,
27592 .tp_ops = &trm290_tp_ops,
27593diff -urNp linux-3.0.7/drivers/ide/via82cxxx.c linux-3.0.7/drivers/ide/via82cxxx.c
27594--- linux-3.0.7/drivers/ide/via82cxxx.c 2011-07-21 22:17:23.000000000 -0400
27595+++ linux-3.0.7/drivers/ide/via82cxxx.c 2011-10-11 10:44:33.000000000 -0400
27596@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
27597 .cable_detect = via82cxxx_cable_detect,
27598 };
27599
27600-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
27601+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
27602 .name = DRV_NAME,
27603 .init_chipset = init_chipset_via82cxxx,
27604 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
27605diff -urNp linux-3.0.7/drivers/infiniband/core/cm.c linux-3.0.7/drivers/infiniband/core/cm.c
27606--- linux-3.0.7/drivers/infiniband/core/cm.c 2011-07-21 22:17:23.000000000 -0400
27607+++ linux-3.0.7/drivers/infiniband/core/cm.c 2011-08-23 21:47:55.000000000 -0400
27608@@ -113,7 +113,7 @@ static char const counter_group_names[CM
27609
27610 struct cm_counter_group {
27611 struct kobject obj;
27612- atomic_long_t counter[CM_ATTR_COUNT];
27613+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
27614 };
27615
27616 struct cm_counter_attribute {
27617@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
27618 struct ib_mad_send_buf *msg = NULL;
27619 int ret;
27620
27621- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27622+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27623 counter[CM_REQ_COUNTER]);
27624
27625 /* Quick state check to discard duplicate REQs. */
27626@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
27627 if (!cm_id_priv)
27628 return;
27629
27630- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27631+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27632 counter[CM_REP_COUNTER]);
27633 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
27634 if (ret)
27635@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
27636 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
27637 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
27638 spin_unlock_irq(&cm_id_priv->lock);
27639- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27640+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27641 counter[CM_RTU_COUNTER]);
27642 goto out;
27643 }
27644@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
27645 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
27646 dreq_msg->local_comm_id);
27647 if (!cm_id_priv) {
27648- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27649+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27650 counter[CM_DREQ_COUNTER]);
27651 cm_issue_drep(work->port, work->mad_recv_wc);
27652 return -EINVAL;
27653@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
27654 case IB_CM_MRA_REP_RCVD:
27655 break;
27656 case IB_CM_TIMEWAIT:
27657- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27658+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27659 counter[CM_DREQ_COUNTER]);
27660 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27661 goto unlock;
27662@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
27663 cm_free_msg(msg);
27664 goto deref;
27665 case IB_CM_DREQ_RCVD:
27666- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27667+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27668 counter[CM_DREQ_COUNTER]);
27669 goto unlock;
27670 default:
27671@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
27672 ib_modify_mad(cm_id_priv->av.port->mad_agent,
27673 cm_id_priv->msg, timeout)) {
27674 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
27675- atomic_long_inc(&work->port->
27676+ atomic_long_inc_unchecked(&work->port->
27677 counter_group[CM_RECV_DUPLICATES].
27678 counter[CM_MRA_COUNTER]);
27679 goto out;
27680@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
27681 break;
27682 case IB_CM_MRA_REQ_RCVD:
27683 case IB_CM_MRA_REP_RCVD:
27684- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27685+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27686 counter[CM_MRA_COUNTER]);
27687 /* fall through */
27688 default:
27689@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
27690 case IB_CM_LAP_IDLE:
27691 break;
27692 case IB_CM_MRA_LAP_SENT:
27693- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27694+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27695 counter[CM_LAP_COUNTER]);
27696 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
27697 goto unlock;
27698@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
27699 cm_free_msg(msg);
27700 goto deref;
27701 case IB_CM_LAP_RCVD:
27702- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27703+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27704 counter[CM_LAP_COUNTER]);
27705 goto unlock;
27706 default:
27707@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
27708 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
27709 if (cur_cm_id_priv) {
27710 spin_unlock_irq(&cm.lock);
27711- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
27712+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
27713 counter[CM_SIDR_REQ_COUNTER]);
27714 goto out; /* Duplicate message. */
27715 }
27716@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
27717 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
27718 msg->retries = 1;
27719
27720- atomic_long_add(1 + msg->retries,
27721+ atomic_long_add_unchecked(1 + msg->retries,
27722 &port->counter_group[CM_XMIT].counter[attr_index]);
27723 if (msg->retries)
27724- atomic_long_add(msg->retries,
27725+ atomic_long_add_unchecked(msg->retries,
27726 &port->counter_group[CM_XMIT_RETRIES].
27727 counter[attr_index]);
27728
27729@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
27730 }
27731
27732 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
27733- atomic_long_inc(&port->counter_group[CM_RECV].
27734+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
27735 counter[attr_id - CM_ATTR_ID_OFFSET]);
27736
27737 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
27738@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
27739 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
27740
27741 return sprintf(buf, "%ld\n",
27742- atomic_long_read(&group->counter[cm_attr->index]));
27743+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
27744 }
27745
27746 static const struct sysfs_ops cm_counter_ops = {
27747diff -urNp linux-3.0.7/drivers/infiniband/core/fmr_pool.c linux-3.0.7/drivers/infiniband/core/fmr_pool.c
27748--- linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-07-21 22:17:23.000000000 -0400
27749+++ linux-3.0.7/drivers/infiniband/core/fmr_pool.c 2011-08-23 21:47:55.000000000 -0400
27750@@ -97,8 +97,8 @@ struct ib_fmr_pool {
27751
27752 struct task_struct *thread;
27753
27754- atomic_t req_ser;
27755- atomic_t flush_ser;
27756+ atomic_unchecked_t req_ser;
27757+ atomic_unchecked_t flush_ser;
27758
27759 wait_queue_head_t force_wait;
27760 };
27761@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
27762 struct ib_fmr_pool *pool = pool_ptr;
27763
27764 do {
27765- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
27766+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
27767 ib_fmr_batch_release(pool);
27768
27769- atomic_inc(&pool->flush_ser);
27770+ atomic_inc_unchecked(&pool->flush_ser);
27771 wake_up_interruptible(&pool->force_wait);
27772
27773 if (pool->flush_function)
27774@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
27775 }
27776
27777 set_current_state(TASK_INTERRUPTIBLE);
27778- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
27779+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
27780 !kthread_should_stop())
27781 schedule();
27782 __set_current_state(TASK_RUNNING);
27783@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
27784 pool->dirty_watermark = params->dirty_watermark;
27785 pool->dirty_len = 0;
27786 spin_lock_init(&pool->pool_lock);
27787- atomic_set(&pool->req_ser, 0);
27788- atomic_set(&pool->flush_ser, 0);
27789+ atomic_set_unchecked(&pool->req_ser, 0);
27790+ atomic_set_unchecked(&pool->flush_ser, 0);
27791 init_waitqueue_head(&pool->force_wait);
27792
27793 pool->thread = kthread_run(ib_fmr_cleanup_thread,
27794@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
27795 }
27796 spin_unlock_irq(&pool->pool_lock);
27797
27798- serial = atomic_inc_return(&pool->req_ser);
27799+ serial = atomic_inc_return_unchecked(&pool->req_ser);
27800 wake_up_process(pool->thread);
27801
27802 if (wait_event_interruptible(pool->force_wait,
27803- atomic_read(&pool->flush_ser) - serial >= 0))
27804+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
27805 return -EINTR;
27806
27807 return 0;
27808@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
27809 } else {
27810 list_add_tail(&fmr->list, &pool->dirty_list);
27811 if (++pool->dirty_len >= pool->dirty_watermark) {
27812- atomic_inc(&pool->req_ser);
27813+ atomic_inc_unchecked(&pool->req_ser);
27814 wake_up_process(pool->thread);
27815 }
27816 }
27817diff -urNp linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c
27818--- linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-07-21 22:17:23.000000000 -0400
27819+++ linux-3.0.7/drivers/infiniband/hw/cxgb4/mem.c 2011-08-23 21:47:55.000000000 -0400
27820@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
27821 int err;
27822 struct fw_ri_tpte tpt;
27823 u32 stag_idx;
27824- static atomic_t key;
27825+ static atomic_unchecked_t key;
27826
27827 if (c4iw_fatal_error(rdev))
27828 return -EIO;
27829@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
27830 &rdev->resource.tpt_fifo_lock);
27831 if (!stag_idx)
27832 return -ENOMEM;
27833- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
27834+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
27835 }
27836 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
27837 __func__, stag_state, type, pdid, stag_idx);
27838diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c
27839--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-07-21 22:17:23.000000000 -0400
27840+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-23 21:48:14.000000000 -0400
27841@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
27842 struct infinipath_counters counters;
27843 struct ipath_devdata *dd;
27844
27845+ pax_track_stack();
27846+
27847 dd = file->f_path.dentry->d_inode->i_private;
27848 dd->ipath_f_read_counters(dd, &counters);
27849
27850diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c
27851--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-07-21 22:17:23.000000000 -0400
27852+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-23 21:47:55.000000000 -0400
27853@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27854 struct ib_atomic_eth *ateth;
27855 struct ipath_ack_entry *e;
27856 u64 vaddr;
27857- atomic64_t *maddr;
27858+ atomic64_unchecked_t *maddr;
27859 u64 sdata;
27860 u32 rkey;
27861 u8 next;
27862@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
27863 IB_ACCESS_REMOTE_ATOMIC)))
27864 goto nack_acc_unlck;
27865 /* Perform atomic OP and save result. */
27866- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27867+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27868 sdata = be64_to_cpu(ateth->swap_data);
27869 e = &qp->s_ack_queue[qp->r_head_ack_queue];
27870 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
27871- (u64) atomic64_add_return(sdata, maddr) - sdata :
27872+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27873 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27874 be64_to_cpu(ateth->compare_data),
27875 sdata);
27876diff -urNp linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c
27877--- linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-07-21 22:17:23.000000000 -0400
27878+++ linux-3.0.7/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-23 21:47:55.000000000 -0400
27879@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
27880 unsigned long flags;
27881 struct ib_wc wc;
27882 u64 sdata;
27883- atomic64_t *maddr;
27884+ atomic64_unchecked_t *maddr;
27885 enum ib_wc_status send_status;
27886
27887 /*
27888@@ -382,11 +382,11 @@ again:
27889 IB_ACCESS_REMOTE_ATOMIC)))
27890 goto acc_err;
27891 /* Perform atomic OP and save result. */
27892- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
27893+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
27894 sdata = wqe->wr.wr.atomic.compare_add;
27895 *(u64 *) sqp->s_sge.sge.vaddr =
27896 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
27897- (u64) atomic64_add_return(sdata, maddr) - sdata :
27898+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
27899 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
27900 sdata, wqe->wr.wr.atomic.swap);
27901 goto send_comp;
27902diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.c linux-3.0.7/drivers/infiniband/hw/nes/nes.c
27903--- linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-07-21 22:17:23.000000000 -0400
27904+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.c 2011-08-23 21:47:55.000000000 -0400
27905@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
27906 LIST_HEAD(nes_adapter_list);
27907 static LIST_HEAD(nes_dev_list);
27908
27909-atomic_t qps_destroyed;
27910+atomic_unchecked_t qps_destroyed;
27911
27912 static unsigned int ee_flsh_adapter;
27913 static unsigned int sysfs_nonidx_addr;
27914@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
27915 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
27916 struct nes_adapter *nesadapter = nesdev->nesadapter;
27917
27918- atomic_inc(&qps_destroyed);
27919+ atomic_inc_unchecked(&qps_destroyed);
27920
27921 /* Free the control structures */
27922
27923diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c
27924--- linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-07-21 22:17:23.000000000 -0400
27925+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_cm.c 2011-08-23 21:47:55.000000000 -0400
27926@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
27927 u32 cm_packets_retrans;
27928 u32 cm_packets_created;
27929 u32 cm_packets_received;
27930-atomic_t cm_listens_created;
27931-atomic_t cm_listens_destroyed;
27932+atomic_unchecked_t cm_listens_created;
27933+atomic_unchecked_t cm_listens_destroyed;
27934 u32 cm_backlog_drops;
27935-atomic_t cm_loopbacks;
27936-atomic_t cm_nodes_created;
27937-atomic_t cm_nodes_destroyed;
27938-atomic_t cm_accel_dropped_pkts;
27939-atomic_t cm_resets_recvd;
27940+atomic_unchecked_t cm_loopbacks;
27941+atomic_unchecked_t cm_nodes_created;
27942+atomic_unchecked_t cm_nodes_destroyed;
27943+atomic_unchecked_t cm_accel_dropped_pkts;
27944+atomic_unchecked_t cm_resets_recvd;
27945
27946 static inline int mini_cm_accelerated(struct nes_cm_core *,
27947 struct nes_cm_node *);
27948@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
27949
27950 static struct nes_cm_core *g_cm_core;
27951
27952-atomic_t cm_connects;
27953-atomic_t cm_accepts;
27954-atomic_t cm_disconnects;
27955-atomic_t cm_closes;
27956-atomic_t cm_connecteds;
27957-atomic_t cm_connect_reqs;
27958-atomic_t cm_rejects;
27959+atomic_unchecked_t cm_connects;
27960+atomic_unchecked_t cm_accepts;
27961+atomic_unchecked_t cm_disconnects;
27962+atomic_unchecked_t cm_closes;
27963+atomic_unchecked_t cm_connecteds;
27964+atomic_unchecked_t cm_connect_reqs;
27965+atomic_unchecked_t cm_rejects;
27966
27967
27968 /**
27969@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
27970 kfree(listener);
27971 listener = NULL;
27972 ret = 0;
27973- atomic_inc(&cm_listens_destroyed);
27974+ atomic_inc_unchecked(&cm_listens_destroyed);
27975 } else {
27976 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
27977 }
27978@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
27979 cm_node->rem_mac);
27980
27981 add_hte_node(cm_core, cm_node);
27982- atomic_inc(&cm_nodes_created);
27983+ atomic_inc_unchecked(&cm_nodes_created);
27984
27985 return cm_node;
27986 }
27987@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
27988 }
27989
27990 atomic_dec(&cm_core->node_cnt);
27991- atomic_inc(&cm_nodes_destroyed);
27992+ atomic_inc_unchecked(&cm_nodes_destroyed);
27993 nesqp = cm_node->nesqp;
27994 if (nesqp) {
27995 nesqp->cm_node = NULL;
27996@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
27997
27998 static void drop_packet(struct sk_buff *skb)
27999 {
28000- atomic_inc(&cm_accel_dropped_pkts);
28001+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28002 dev_kfree_skb_any(skb);
28003 }
28004
28005@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28006 {
28007
28008 int reset = 0; /* whether to send reset in case of err.. */
28009- atomic_inc(&cm_resets_recvd);
28010+ atomic_inc_unchecked(&cm_resets_recvd);
28011 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28012 " refcnt=%d\n", cm_node, cm_node->state,
28013 atomic_read(&cm_node->ref_count));
28014@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28015 rem_ref_cm_node(cm_node->cm_core, cm_node);
28016 return NULL;
28017 }
28018- atomic_inc(&cm_loopbacks);
28019+ atomic_inc_unchecked(&cm_loopbacks);
28020 loopbackremotenode->loopbackpartner = cm_node;
28021 loopbackremotenode->tcp_cntxt.rcv_wscale =
28022 NES_CM_DEFAULT_RCV_WND_SCALE;
28023@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28024 add_ref_cm_node(cm_node);
28025 } else if (cm_node->state == NES_CM_STATE_TSA) {
28026 rem_ref_cm_node(cm_core, cm_node);
28027- atomic_inc(&cm_accel_dropped_pkts);
28028+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28029 dev_kfree_skb_any(skb);
28030 break;
28031 }
28032@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28033
28034 if ((cm_id) && (cm_id->event_handler)) {
28035 if (issue_disconn) {
28036- atomic_inc(&cm_disconnects);
28037+ atomic_inc_unchecked(&cm_disconnects);
28038 cm_event.event = IW_CM_EVENT_DISCONNECT;
28039 cm_event.status = disconn_status;
28040 cm_event.local_addr = cm_id->local_addr;
28041@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28042 }
28043
28044 if (issue_close) {
28045- atomic_inc(&cm_closes);
28046+ atomic_inc_unchecked(&cm_closes);
28047 nes_disconnect(nesqp, 1);
28048
28049 cm_id->provider_data = nesqp;
28050@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28051
28052 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28053 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28054- atomic_inc(&cm_accepts);
28055+ atomic_inc_unchecked(&cm_accepts);
28056
28057 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28058 netdev_refcnt_read(nesvnic->netdev));
28059@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28060
28061 struct nes_cm_core *cm_core;
28062
28063- atomic_inc(&cm_rejects);
28064+ atomic_inc_unchecked(&cm_rejects);
28065 cm_node = (struct nes_cm_node *) cm_id->provider_data;
28066 loopback = cm_node->loopbackpartner;
28067 cm_core = cm_node->cm_core;
28068@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28069 ntohl(cm_id->local_addr.sin_addr.s_addr),
28070 ntohs(cm_id->local_addr.sin_port));
28071
28072- atomic_inc(&cm_connects);
28073+ atomic_inc_unchecked(&cm_connects);
28074 nesqp->active_conn = 1;
28075
28076 /* cache the cm_id in the qp */
28077@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28078 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28079 return err;
28080 }
28081- atomic_inc(&cm_listens_created);
28082+ atomic_inc_unchecked(&cm_listens_created);
28083 }
28084
28085 cm_id->add_ref(cm_id);
28086@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28087 if (nesqp->destroyed) {
28088 return;
28089 }
28090- atomic_inc(&cm_connecteds);
28091+ atomic_inc_unchecked(&cm_connecteds);
28092 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28093 " local port 0x%04X. jiffies = %lu.\n",
28094 nesqp->hwqp.qp_id,
28095@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28096
28097 cm_id->add_ref(cm_id);
28098 ret = cm_id->event_handler(cm_id, &cm_event);
28099- atomic_inc(&cm_closes);
28100+ atomic_inc_unchecked(&cm_closes);
28101 cm_event.event = IW_CM_EVENT_CLOSE;
28102 cm_event.status = 0;
28103 cm_event.provider_data = cm_id->provider_data;
28104@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28105 return;
28106 cm_id = cm_node->cm_id;
28107
28108- atomic_inc(&cm_connect_reqs);
28109+ atomic_inc_unchecked(&cm_connect_reqs);
28110 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28111 cm_node, cm_id, jiffies);
28112
28113@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28114 return;
28115 cm_id = cm_node->cm_id;
28116
28117- atomic_inc(&cm_connect_reqs);
28118+ atomic_inc_unchecked(&cm_connect_reqs);
28119 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28120 cm_node, cm_id, jiffies);
28121
28122diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes.h linux-3.0.7/drivers/infiniband/hw/nes/nes.h
28123--- linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-07-21 22:17:23.000000000 -0400
28124+++ linux-3.0.7/drivers/infiniband/hw/nes/nes.h 2011-08-23 21:47:55.000000000 -0400
28125@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28126 extern unsigned int wqm_quanta;
28127 extern struct list_head nes_adapter_list;
28128
28129-extern atomic_t cm_connects;
28130-extern atomic_t cm_accepts;
28131-extern atomic_t cm_disconnects;
28132-extern atomic_t cm_closes;
28133-extern atomic_t cm_connecteds;
28134-extern atomic_t cm_connect_reqs;
28135-extern atomic_t cm_rejects;
28136-extern atomic_t mod_qp_timouts;
28137-extern atomic_t qps_created;
28138-extern atomic_t qps_destroyed;
28139-extern atomic_t sw_qps_destroyed;
28140+extern atomic_unchecked_t cm_connects;
28141+extern atomic_unchecked_t cm_accepts;
28142+extern atomic_unchecked_t cm_disconnects;
28143+extern atomic_unchecked_t cm_closes;
28144+extern atomic_unchecked_t cm_connecteds;
28145+extern atomic_unchecked_t cm_connect_reqs;
28146+extern atomic_unchecked_t cm_rejects;
28147+extern atomic_unchecked_t mod_qp_timouts;
28148+extern atomic_unchecked_t qps_created;
28149+extern atomic_unchecked_t qps_destroyed;
28150+extern atomic_unchecked_t sw_qps_destroyed;
28151 extern u32 mh_detected;
28152 extern u32 mh_pauses_sent;
28153 extern u32 cm_packets_sent;
28154@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28155 extern u32 cm_packets_received;
28156 extern u32 cm_packets_dropped;
28157 extern u32 cm_packets_retrans;
28158-extern atomic_t cm_listens_created;
28159-extern atomic_t cm_listens_destroyed;
28160+extern atomic_unchecked_t cm_listens_created;
28161+extern atomic_unchecked_t cm_listens_destroyed;
28162 extern u32 cm_backlog_drops;
28163-extern atomic_t cm_loopbacks;
28164-extern atomic_t cm_nodes_created;
28165-extern atomic_t cm_nodes_destroyed;
28166-extern atomic_t cm_accel_dropped_pkts;
28167-extern atomic_t cm_resets_recvd;
28168+extern atomic_unchecked_t cm_loopbacks;
28169+extern atomic_unchecked_t cm_nodes_created;
28170+extern atomic_unchecked_t cm_nodes_destroyed;
28171+extern atomic_unchecked_t cm_accel_dropped_pkts;
28172+extern atomic_unchecked_t cm_resets_recvd;
28173
28174 extern u32 int_mod_timer_init;
28175 extern u32 int_mod_cq_depth_256;
28176diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c
28177--- linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-07-21 22:17:23.000000000 -0400
28178+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_nic.c 2011-08-23 21:47:55.000000000 -0400
28179@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28180 target_stat_values[++index] = mh_detected;
28181 target_stat_values[++index] = mh_pauses_sent;
28182 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28183- target_stat_values[++index] = atomic_read(&cm_connects);
28184- target_stat_values[++index] = atomic_read(&cm_accepts);
28185- target_stat_values[++index] = atomic_read(&cm_disconnects);
28186- target_stat_values[++index] = atomic_read(&cm_connecteds);
28187- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28188- target_stat_values[++index] = atomic_read(&cm_rejects);
28189- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28190- target_stat_values[++index] = atomic_read(&qps_created);
28191- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28192- target_stat_values[++index] = atomic_read(&qps_destroyed);
28193- target_stat_values[++index] = atomic_read(&cm_closes);
28194+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28195+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28196+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28197+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28198+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28199+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28200+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28201+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28202+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28203+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28204+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28205 target_stat_values[++index] = cm_packets_sent;
28206 target_stat_values[++index] = cm_packets_bounced;
28207 target_stat_values[++index] = cm_packets_created;
28208 target_stat_values[++index] = cm_packets_received;
28209 target_stat_values[++index] = cm_packets_dropped;
28210 target_stat_values[++index] = cm_packets_retrans;
28211- target_stat_values[++index] = atomic_read(&cm_listens_created);
28212- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28213+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28214+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28215 target_stat_values[++index] = cm_backlog_drops;
28216- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28217- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28218- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28219- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28220- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28221+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28222+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28223+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28224+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28225+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28226 target_stat_values[++index] = nesadapter->free_4kpbl;
28227 target_stat_values[++index] = nesadapter->free_256pbl;
28228 target_stat_values[++index] = int_mod_timer_init;
28229diff -urNp linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c
28230--- linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-07-21 22:17:23.000000000 -0400
28231+++ linux-3.0.7/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-23 21:47:55.000000000 -0400
28232@@ -46,9 +46,9 @@
28233
28234 #include <rdma/ib_umem.h>
28235
28236-atomic_t mod_qp_timouts;
28237-atomic_t qps_created;
28238-atomic_t sw_qps_destroyed;
28239+atomic_unchecked_t mod_qp_timouts;
28240+atomic_unchecked_t qps_created;
28241+atomic_unchecked_t sw_qps_destroyed;
28242
28243 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28244
28245@@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
28246 if (init_attr->create_flags)
28247 return ERR_PTR(-EINVAL);
28248
28249- atomic_inc(&qps_created);
28250+ atomic_inc_unchecked(&qps_created);
28251 switch (init_attr->qp_type) {
28252 case IB_QPT_RC:
28253 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28254@@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
28255 struct iw_cm_event cm_event;
28256 int ret;
28257
28258- atomic_inc(&sw_qps_destroyed);
28259+ atomic_inc_unchecked(&sw_qps_destroyed);
28260 nesqp->destroyed = 1;
28261
28262 /* Blow away the connection if it exists. */
28263diff -urNp linux-3.0.7/drivers/infiniband/hw/qib/qib.h linux-3.0.7/drivers/infiniband/hw/qib/qib.h
28264--- linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-07-21 22:17:23.000000000 -0400
28265+++ linux-3.0.7/drivers/infiniband/hw/qib/qib.h 2011-08-23 21:47:55.000000000 -0400
28266@@ -51,6 +51,7 @@
28267 #include <linux/completion.h>
28268 #include <linux/kref.h>
28269 #include <linux/sched.h>
28270+#include <linux/slab.h>
28271
28272 #include "qib_common.h"
28273 #include "qib_verbs.h"
28274diff -urNp linux-3.0.7/drivers/input/gameport/gameport.c linux-3.0.7/drivers/input/gameport/gameport.c
28275--- linux-3.0.7/drivers/input/gameport/gameport.c 2011-07-21 22:17:23.000000000 -0400
28276+++ linux-3.0.7/drivers/input/gameport/gameport.c 2011-08-23 21:47:55.000000000 -0400
28277@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28278 */
28279 static void gameport_init_port(struct gameport *gameport)
28280 {
28281- static atomic_t gameport_no = ATOMIC_INIT(0);
28282+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28283
28284 __module_get(THIS_MODULE);
28285
28286 mutex_init(&gameport->drv_mutex);
28287 device_initialize(&gameport->dev);
28288 dev_set_name(&gameport->dev, "gameport%lu",
28289- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28290+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28291 gameport->dev.bus = &gameport_bus;
28292 gameport->dev.release = gameport_release_port;
28293 if (gameport->parent)
28294diff -urNp linux-3.0.7/drivers/input/input.c linux-3.0.7/drivers/input/input.c
28295--- linux-3.0.7/drivers/input/input.c 2011-07-21 22:17:23.000000000 -0400
28296+++ linux-3.0.7/drivers/input/input.c 2011-08-23 21:47:55.000000000 -0400
28297@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28298 */
28299 int input_register_device(struct input_dev *dev)
28300 {
28301- static atomic_t input_no = ATOMIC_INIT(0);
28302+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28303 struct input_handler *handler;
28304 const char *path;
28305 int error;
28306@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28307 dev->setkeycode = input_default_setkeycode;
28308
28309 dev_set_name(&dev->dev, "input%ld",
28310- (unsigned long) atomic_inc_return(&input_no) - 1);
28311+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28312
28313 error = device_add(&dev->dev);
28314 if (error)
28315diff -urNp linux-3.0.7/drivers/input/joystick/sidewinder.c linux-3.0.7/drivers/input/joystick/sidewinder.c
28316--- linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-07-21 22:17:23.000000000 -0400
28317+++ linux-3.0.7/drivers/input/joystick/sidewinder.c 2011-08-23 21:48:14.000000000 -0400
28318@@ -30,6 +30,7 @@
28319 #include <linux/kernel.h>
28320 #include <linux/module.h>
28321 #include <linux/slab.h>
28322+#include <linux/sched.h>
28323 #include <linux/init.h>
28324 #include <linux/input.h>
28325 #include <linux/gameport.h>
28326@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28327 unsigned char buf[SW_LENGTH];
28328 int i;
28329
28330+ pax_track_stack();
28331+
28332 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28333
28334 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28335diff -urNp linux-3.0.7/drivers/input/joystick/xpad.c linux-3.0.7/drivers/input/joystick/xpad.c
28336--- linux-3.0.7/drivers/input/joystick/xpad.c 2011-07-21 22:17:23.000000000 -0400
28337+++ linux-3.0.7/drivers/input/joystick/xpad.c 2011-08-23 21:47:55.000000000 -0400
28338@@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
28339
28340 static int xpad_led_probe(struct usb_xpad *xpad)
28341 {
28342- static atomic_t led_seq = ATOMIC_INIT(0);
28343+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28344 long led_no;
28345 struct xpad_led *led;
28346 struct led_classdev *led_cdev;
28347@@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
28348 if (!led)
28349 return -ENOMEM;
28350
28351- led_no = (long)atomic_inc_return(&led_seq) - 1;
28352+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28353
28354 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28355 led->xpad = xpad;
28356diff -urNp linux-3.0.7/drivers/input/mousedev.c linux-3.0.7/drivers/input/mousedev.c
28357--- linux-3.0.7/drivers/input/mousedev.c 2011-07-21 22:17:23.000000000 -0400
28358+++ linux-3.0.7/drivers/input/mousedev.c 2011-08-23 21:47:55.000000000 -0400
28359@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28360
28361 spin_unlock_irq(&client->packet_lock);
28362
28363- if (copy_to_user(buffer, data, count))
28364+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28365 return -EFAULT;
28366
28367 return count;
28368diff -urNp linux-3.0.7/drivers/input/serio/serio.c linux-3.0.7/drivers/input/serio/serio.c
28369--- linux-3.0.7/drivers/input/serio/serio.c 2011-07-21 22:17:23.000000000 -0400
28370+++ linux-3.0.7/drivers/input/serio/serio.c 2011-08-23 21:47:55.000000000 -0400
28371@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28372 */
28373 static void serio_init_port(struct serio *serio)
28374 {
28375- static atomic_t serio_no = ATOMIC_INIT(0);
28376+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28377
28378 __module_get(THIS_MODULE);
28379
28380@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28381 mutex_init(&serio->drv_mutex);
28382 device_initialize(&serio->dev);
28383 dev_set_name(&serio->dev, "serio%ld",
28384- (long)atomic_inc_return(&serio_no) - 1);
28385+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28386 serio->dev.bus = &serio_bus;
28387 serio->dev.release = serio_release_port;
28388 serio->dev.groups = serio_device_attr_groups;
28389diff -urNp linux-3.0.7/drivers/isdn/capi/capi.c linux-3.0.7/drivers/isdn/capi/capi.c
28390--- linux-3.0.7/drivers/isdn/capi/capi.c 2011-07-21 22:17:23.000000000 -0400
28391+++ linux-3.0.7/drivers/isdn/capi/capi.c 2011-08-23 21:47:55.000000000 -0400
28392@@ -83,8 +83,8 @@ struct capiminor {
28393
28394 struct capi20_appl *ap;
28395 u32 ncci;
28396- atomic_t datahandle;
28397- atomic_t msgid;
28398+ atomic_unchecked_t datahandle;
28399+ atomic_unchecked_t msgid;
28400
28401 struct tty_port port;
28402 int ttyinstop;
28403@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28404 capimsg_setu16(s, 2, mp->ap->applid);
28405 capimsg_setu8 (s, 4, CAPI_DATA_B3);
28406 capimsg_setu8 (s, 5, CAPI_RESP);
28407- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28408+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28409 capimsg_setu32(s, 8, mp->ncci);
28410 capimsg_setu16(s, 12, datahandle);
28411 }
28412@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28413 mp->outbytes -= len;
28414 spin_unlock_bh(&mp->outlock);
28415
28416- datahandle = atomic_inc_return(&mp->datahandle);
28417+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28418 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28419 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28420 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28421 capimsg_setu16(skb->data, 2, mp->ap->applid);
28422 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28423 capimsg_setu8 (skb->data, 5, CAPI_REQ);
28424- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28425+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28426 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28427 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28428 capimsg_setu16(skb->data, 16, len); /* Data length */
28429diff -urNp linux-3.0.7/drivers/isdn/gigaset/common.c linux-3.0.7/drivers/isdn/gigaset/common.c
28430--- linux-3.0.7/drivers/isdn/gigaset/common.c 2011-07-21 22:17:23.000000000 -0400
28431+++ linux-3.0.7/drivers/isdn/gigaset/common.c 2011-08-23 21:47:55.000000000 -0400
28432@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28433 cs->commands_pending = 0;
28434 cs->cur_at_seq = 0;
28435 cs->gotfwver = -1;
28436- cs->open_count = 0;
28437+ local_set(&cs->open_count, 0);
28438 cs->dev = NULL;
28439 cs->tty = NULL;
28440 cs->tty_dev = NULL;
28441diff -urNp linux-3.0.7/drivers/isdn/gigaset/gigaset.h linux-3.0.7/drivers/isdn/gigaset/gigaset.h
28442--- linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-07-21 22:17:23.000000000 -0400
28443+++ linux-3.0.7/drivers/isdn/gigaset/gigaset.h 2011-08-23 21:47:55.000000000 -0400
28444@@ -35,6 +35,7 @@
28445 #include <linux/tty_driver.h>
28446 #include <linux/list.h>
28447 #include <asm/atomic.h>
28448+#include <asm/local.h>
28449
28450 #define GIG_VERSION {0, 5, 0, 0}
28451 #define GIG_COMPAT {0, 4, 0, 0}
28452@@ -433,7 +434,7 @@ struct cardstate {
28453 spinlock_t cmdlock;
28454 unsigned curlen, cmdbytes;
28455
28456- unsigned open_count;
28457+ local_t open_count;
28458 struct tty_struct *tty;
28459 struct tasklet_struct if_wake_tasklet;
28460 unsigned control_state;
28461diff -urNp linux-3.0.7/drivers/isdn/gigaset/interface.c linux-3.0.7/drivers/isdn/gigaset/interface.c
28462--- linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-07-21 22:17:23.000000000 -0400
28463+++ linux-3.0.7/drivers/isdn/gigaset/interface.c 2011-08-23 21:47:55.000000000 -0400
28464@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
28465 }
28466 tty->driver_data = cs;
28467
28468- ++cs->open_count;
28469-
28470- if (cs->open_count == 1) {
28471+ if (local_inc_return(&cs->open_count) == 1) {
28472 spin_lock_irqsave(&cs->lock, flags);
28473 cs->tty = tty;
28474 spin_unlock_irqrestore(&cs->lock, flags);
28475@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
28476
28477 if (!cs->connected)
28478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28479- else if (!cs->open_count)
28480+ else if (!local_read(&cs->open_count))
28481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28482 else {
28483- if (!--cs->open_count) {
28484+ if (!local_dec_return(&cs->open_count)) {
28485 spin_lock_irqsave(&cs->lock, flags);
28486 cs->tty = NULL;
28487 spin_unlock_irqrestore(&cs->lock, flags);
28488@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
28489 if (!cs->connected) {
28490 gig_dbg(DEBUG_IF, "not connected");
28491 retval = -ENODEV;
28492- } else if (!cs->open_count)
28493+ } else if (!local_read(&cs->open_count))
28494 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28495 else {
28496 retval = 0;
28497@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
28498 retval = -ENODEV;
28499 goto done;
28500 }
28501- if (!cs->open_count) {
28502+ if (!local_read(&cs->open_count)) {
28503 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28504 retval = -ENODEV;
28505 goto done;
28506@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
28507 if (!cs->connected) {
28508 gig_dbg(DEBUG_IF, "not connected");
28509 retval = -ENODEV;
28510- } else if (!cs->open_count)
28511+ } else if (!local_read(&cs->open_count))
28512 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28513 else if (cs->mstate != MS_LOCKED) {
28514 dev_warn(cs->dev, "can't write to unlocked device\n");
28515@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
28516
28517 if (!cs->connected)
28518 gig_dbg(DEBUG_IF, "not connected");
28519- else if (!cs->open_count)
28520+ else if (!local_read(&cs->open_count))
28521 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28522 else if (cs->mstate != MS_LOCKED)
28523 dev_warn(cs->dev, "can't write to unlocked device\n");
28524@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
28525
28526 if (!cs->connected)
28527 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28528- else if (!cs->open_count)
28529+ else if (!local_read(&cs->open_count))
28530 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28531 else
28532 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28533@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
28534
28535 if (!cs->connected)
28536 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
28537- else if (!cs->open_count)
28538+ else if (!local_read(&cs->open_count))
28539 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28540 else
28541 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
28542@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
28543 goto out;
28544 }
28545
28546- if (!cs->open_count) {
28547+ if (!local_read(&cs->open_count)) {
28548 dev_warn(cs->dev, "%s: device not opened\n", __func__);
28549 goto out;
28550 }
28551diff -urNp linux-3.0.7/drivers/isdn/hardware/avm/b1.c linux-3.0.7/drivers/isdn/hardware/avm/b1.c
28552--- linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-07-21 22:17:23.000000000 -0400
28553+++ linux-3.0.7/drivers/isdn/hardware/avm/b1.c 2011-08-23 21:47:55.000000000 -0400
28554@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
28555 }
28556 if (left) {
28557 if (t4file->user) {
28558- if (copy_from_user(buf, dp, left))
28559+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28560 return -EFAULT;
28561 } else {
28562 memcpy(buf, dp, left);
28563@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
28564 }
28565 if (left) {
28566 if (config->user) {
28567- if (copy_from_user(buf, dp, left))
28568+ if (left > sizeof buf || copy_from_user(buf, dp, left))
28569 return -EFAULT;
28570 } else {
28571 memcpy(buf, dp, left);
28572diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c
28573--- linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-07-21 22:17:23.000000000 -0400
28574+++ linux-3.0.7/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-23 21:48:14.000000000 -0400
28575@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
28576 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
28577 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
28578
28579+ pax_track_stack();
28580
28581 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
28582 {
28583diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c
28584--- linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-07-21 22:17:23.000000000 -0400
28585+++ linux-3.0.7/drivers/isdn/hardware/eicon/capifunc.c 2011-08-23 21:48:14.000000000 -0400
28586@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
28587 IDI_SYNC_REQ req;
28588 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28589
28590+ pax_track_stack();
28591+
28592 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28593
28594 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28595diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c
28596--- linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-07-21 22:17:23.000000000 -0400
28597+++ linux-3.0.7/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-23 21:48:14.000000000 -0400
28598@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
28599 IDI_SYNC_REQ req;
28600 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28601
28602+ pax_track_stack();
28603+
28604 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28605
28606 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28607diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c
28608--- linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-07-21 22:17:23.000000000 -0400
28609+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-23 21:48:14.000000000 -0400
28610@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
28611 IDI_SYNC_REQ req;
28612 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28613
28614+ pax_track_stack();
28615+
28616 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28617
28618 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28619diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h
28620--- linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-07-21 22:17:23.000000000 -0400
28621+++ linux-3.0.7/drivers/isdn/hardware/eicon/divasync.h 2011-08-23 21:47:55.000000000 -0400
28622@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
28623 } diva_didd_add_adapter_t;
28624 typedef struct _diva_didd_remove_adapter {
28625 IDI_CALL p_request;
28626-} diva_didd_remove_adapter_t;
28627+} __no_const diva_didd_remove_adapter_t;
28628 typedef struct _diva_didd_read_adapter_array {
28629 void * buffer;
28630 dword length;
28631diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c
28632--- linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-07-21 22:17:23.000000000 -0400
28633+++ linux-3.0.7/drivers/isdn/hardware/eicon/idifunc.c 2011-08-23 21:48:14.000000000 -0400
28634@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
28635 IDI_SYNC_REQ req;
28636 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28637
28638+ pax_track_stack();
28639+
28640 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28641
28642 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28643diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/message.c linux-3.0.7/drivers/isdn/hardware/eicon/message.c
28644--- linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-07-21 22:17:23.000000000 -0400
28645+++ linux-3.0.7/drivers/isdn/hardware/eicon/message.c 2011-08-23 21:48:14.000000000 -0400
28646@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
28647 dword d;
28648 word w;
28649
28650+ pax_track_stack();
28651+
28652 a = plci->adapter;
28653 Id = ((word)plci->Id<<8)|a->Id;
28654 PUT_WORD(&SS_Ind[4],0x0000);
28655@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
28656 word j, n, w;
28657 dword d;
28658
28659+ pax_track_stack();
28660+
28661
28662 for(i=0;i<8;i++) bp_parms[i].length = 0;
28663 for(i=0;i<2;i++) global_config[i].length = 0;
28664@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
28665 const byte llc3[] = {4,3,2,2,6,6,0};
28666 const byte header[] = {0,2,3,3,0,0,0};
28667
28668+ pax_track_stack();
28669+
28670 for(i=0;i<8;i++) bp_parms[i].length = 0;
28671 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
28672 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
28673@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
28674 word appl_number_group_type[MAX_APPL];
28675 PLCI *auxplci;
28676
28677+ pax_track_stack();
28678+
28679 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
28680
28681 if(!a->group_optimization_enabled)
28682diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c
28683--- linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-07-21 22:17:23.000000000 -0400
28684+++ linux-3.0.7/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-23 21:48:14.000000000 -0400
28685@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
28686 IDI_SYNC_REQ req;
28687 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
28688
28689+ pax_track_stack();
28690+
28691 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
28692
28693 for (x = 0; x < MAX_DESCRIPTORS; x++) {
28694diff -urNp linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h
28695--- linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-07-21 22:17:23.000000000 -0400
28696+++ linux-3.0.7/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-23 21:47:55.000000000 -0400
28697@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
28698 typedef struct _diva_os_idi_adapter_interface {
28699 diva_init_card_proc_t cleanup_adapter_proc;
28700 diva_cmd_card_proc_t cmd_proc;
28701-} diva_os_idi_adapter_interface_t;
28702+} __no_const diva_os_idi_adapter_interface_t;
28703
28704 typedef struct _diva_os_xdi_adapter {
28705 struct list_head link;
28706diff -urNp linux-3.0.7/drivers/isdn/i4l/isdn_common.c linux-3.0.7/drivers/isdn/i4l/isdn_common.c
28707--- linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-07-21 22:17:23.000000000 -0400
28708+++ linux-3.0.7/drivers/isdn/i4l/isdn_common.c 2011-08-23 21:48:14.000000000 -0400
28709@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
28710 } iocpar;
28711 void __user *argp = (void __user *)arg;
28712
28713+ pax_track_stack();
28714+
28715 #define name iocpar.name
28716 #define bname iocpar.bname
28717 #define iocts iocpar.iocts
28718diff -urNp linux-3.0.7/drivers/isdn/icn/icn.c linux-3.0.7/drivers/isdn/icn/icn.c
28719--- linux-3.0.7/drivers/isdn/icn/icn.c 2011-07-21 22:17:23.000000000 -0400
28720+++ linux-3.0.7/drivers/isdn/icn/icn.c 2011-08-23 21:47:55.000000000 -0400
28721@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
28722 if (count > len)
28723 count = len;
28724 if (user) {
28725- if (copy_from_user(msg, buf, count))
28726+ if (count > sizeof msg || copy_from_user(msg, buf, count))
28727 return -EFAULT;
28728 } else
28729 memcpy(msg, buf, count);
28730diff -urNp linux-3.0.7/drivers/lguest/core.c linux-3.0.7/drivers/lguest/core.c
28731--- linux-3.0.7/drivers/lguest/core.c 2011-07-21 22:17:23.000000000 -0400
28732+++ linux-3.0.7/drivers/lguest/core.c 2011-08-23 21:47:55.000000000 -0400
28733@@ -92,9 +92,17 @@ static __init int map_switcher(void)
28734 * it's worked so far. The end address needs +1 because __get_vm_area
28735 * allocates an extra guard page, so we need space for that.
28736 */
28737+
28738+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28739+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28740+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
28741+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28742+#else
28743 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
28744 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
28745 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
28746+#endif
28747+
28748 if (!switcher_vma) {
28749 err = -ENOMEM;
28750 printk("lguest: could not map switcher pages high\n");
28751@@ -119,7 +127,7 @@ static __init int map_switcher(void)
28752 * Now the Switcher is mapped at the right address, we can't fail!
28753 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
28754 */
28755- memcpy(switcher_vma->addr, start_switcher_text,
28756+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
28757 end_switcher_text - start_switcher_text);
28758
28759 printk(KERN_INFO "lguest: mapped switcher at %p\n",
28760diff -urNp linux-3.0.7/drivers/lguest/x86/core.c linux-3.0.7/drivers/lguest/x86/core.c
28761--- linux-3.0.7/drivers/lguest/x86/core.c 2011-07-21 22:17:23.000000000 -0400
28762+++ linux-3.0.7/drivers/lguest/x86/core.c 2011-08-23 21:47:55.000000000 -0400
28763@@ -59,7 +59,7 @@ static struct {
28764 /* Offset from where switcher.S was compiled to where we've copied it */
28765 static unsigned long switcher_offset(void)
28766 {
28767- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
28768+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
28769 }
28770
28771 /* This cpu's struct lguest_pages. */
28772@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
28773 * These copies are pretty cheap, so we do them unconditionally: */
28774 /* Save the current Host top-level page directory.
28775 */
28776+
28777+#ifdef CONFIG_PAX_PER_CPU_PGD
28778+ pages->state.host_cr3 = read_cr3();
28779+#else
28780 pages->state.host_cr3 = __pa(current->mm->pgd);
28781+#endif
28782+
28783 /*
28784 * Set up the Guest's page tables to see this CPU's pages (and no
28785 * other CPU's pages).
28786@@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
28787 * compiled-in switcher code and the high-mapped copy we just made.
28788 */
28789 for (i = 0; i < IDT_ENTRIES; i++)
28790- default_idt_entries[i] += switcher_offset();
28791+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
28792
28793 /*
28794 * Set up the Switcher's per-cpu areas.
28795@@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
28796 * it will be undisturbed when we switch. To change %cs and jump we
28797 * need this structure to feed to Intel's "lcall" instruction.
28798 */
28799- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
28800+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
28801 lguest_entry.segment = LGUEST_CS;
28802
28803 /*
28804diff -urNp linux-3.0.7/drivers/lguest/x86/switcher_32.S linux-3.0.7/drivers/lguest/x86/switcher_32.S
28805--- linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-07-21 22:17:23.000000000 -0400
28806+++ linux-3.0.7/drivers/lguest/x86/switcher_32.S 2011-08-23 21:47:55.000000000 -0400
28807@@ -87,6 +87,7 @@
28808 #include <asm/page.h>
28809 #include <asm/segment.h>
28810 #include <asm/lguest.h>
28811+#include <asm/processor-flags.h>
28812
28813 // We mark the start of the code to copy
28814 // It's placed in .text tho it's never run here
28815@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
28816 // Changes type when we load it: damn Intel!
28817 // For after we switch over our page tables
28818 // That entry will be read-only: we'd crash.
28819+
28820+#ifdef CONFIG_PAX_KERNEXEC
28821+ mov %cr0, %edx
28822+ xor $X86_CR0_WP, %edx
28823+ mov %edx, %cr0
28824+#endif
28825+
28826 movl $(GDT_ENTRY_TSS*8), %edx
28827 ltr %dx
28828
28829@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
28830 // Let's clear it again for our return.
28831 // The GDT descriptor of the Host
28832 // Points to the table after two "size" bytes
28833- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
28834+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
28835 // Clear "used" from type field (byte 5, bit 2)
28836- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
28837+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
28838+
28839+#ifdef CONFIG_PAX_KERNEXEC
28840+ mov %cr0, %eax
28841+ xor $X86_CR0_WP, %eax
28842+ mov %eax, %cr0
28843+#endif
28844
28845 // Once our page table's switched, the Guest is live!
28846 // The Host fades as we run this final step.
28847@@ -295,13 +309,12 @@ deliver_to_host:
28848 // I consulted gcc, and it gave
28849 // These instructions, which I gladly credit:
28850 leal (%edx,%ebx,8), %eax
28851- movzwl (%eax),%edx
28852- movl 4(%eax), %eax
28853- xorw %ax, %ax
28854- orl %eax, %edx
28855+ movl 4(%eax), %edx
28856+ movw (%eax), %dx
28857 // Now the address of the handler's in %edx
28858 // We call it now: its "iret" drops us home.
28859- jmp *%edx
28860+ ljmp $__KERNEL_CS, $1f
28861+1: jmp *%edx
28862
28863 // Every interrupt can come to us here
28864 // But we must truly tell each apart.
28865diff -urNp linux-3.0.7/drivers/macintosh/macio_asic.c linux-3.0.7/drivers/macintosh/macio_asic.c
28866--- linux-3.0.7/drivers/macintosh/macio_asic.c 2011-07-21 22:17:23.000000000 -0400
28867+++ linux-3.0.7/drivers/macintosh/macio_asic.c 2011-10-11 10:44:33.000000000 -0400
28868@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
28869 * MacIO is matched against any Apple ID, it's probe() function
28870 * will then decide wether it applies or not
28871 */
28872-static const struct pci_device_id __devinitdata pci_ids [] = { {
28873+static const struct pci_device_id __devinitconst pci_ids [] = { {
28874 .vendor = PCI_VENDOR_ID_APPLE,
28875 .device = PCI_ANY_ID,
28876 .subvendor = PCI_ANY_ID,
28877diff -urNp linux-3.0.7/drivers/md/dm.c linux-3.0.7/drivers/md/dm.c
28878--- linux-3.0.7/drivers/md/dm.c 2011-09-02 18:11:21.000000000 -0400
28879+++ linux-3.0.7/drivers/md/dm.c 2011-08-23 21:47:55.000000000 -0400
28880@@ -164,9 +164,9 @@ struct mapped_device {
28881 /*
28882 * Event handling.
28883 */
28884- atomic_t event_nr;
28885+ atomic_unchecked_t event_nr;
28886 wait_queue_head_t eventq;
28887- atomic_t uevent_seq;
28888+ atomic_unchecked_t uevent_seq;
28889 struct list_head uevent_list;
28890 spinlock_t uevent_lock; /* Protect access to uevent_list */
28891
28892@@ -1842,8 +1842,8 @@ static struct mapped_device *alloc_dev(i
28893 rwlock_init(&md->map_lock);
28894 atomic_set(&md->holders, 1);
28895 atomic_set(&md->open_count, 0);
28896- atomic_set(&md->event_nr, 0);
28897- atomic_set(&md->uevent_seq, 0);
28898+ atomic_set_unchecked(&md->event_nr, 0);
28899+ atomic_set_unchecked(&md->uevent_seq, 0);
28900 INIT_LIST_HEAD(&md->uevent_list);
28901 spin_lock_init(&md->uevent_lock);
28902
28903@@ -1977,7 +1977,7 @@ static void event_callback(void *context
28904
28905 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
28906
28907- atomic_inc(&md->event_nr);
28908+ atomic_inc_unchecked(&md->event_nr);
28909 wake_up(&md->eventq);
28910 }
28911
28912@@ -2553,18 +2553,18 @@ int dm_kobject_uevent(struct mapped_devi
28913
28914 uint32_t dm_next_uevent_seq(struct mapped_device *md)
28915 {
28916- return atomic_add_return(1, &md->uevent_seq);
28917+ return atomic_add_return_unchecked(1, &md->uevent_seq);
28918 }
28919
28920 uint32_t dm_get_event_nr(struct mapped_device *md)
28921 {
28922- return atomic_read(&md->event_nr);
28923+ return atomic_read_unchecked(&md->event_nr);
28924 }
28925
28926 int dm_wait_event(struct mapped_device *md, int event_nr)
28927 {
28928 return wait_event_interruptible(md->eventq,
28929- (event_nr != atomic_read(&md->event_nr)));
28930+ (event_nr != atomic_read_unchecked(&md->event_nr)));
28931 }
28932
28933 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
28934diff -urNp linux-3.0.7/drivers/md/dm-ioctl.c linux-3.0.7/drivers/md/dm-ioctl.c
28935--- linux-3.0.7/drivers/md/dm-ioctl.c 2011-07-21 22:17:23.000000000 -0400
28936+++ linux-3.0.7/drivers/md/dm-ioctl.c 2011-08-23 21:47:55.000000000 -0400
28937@@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
28938 cmd == DM_LIST_VERSIONS_CMD)
28939 return 0;
28940
28941- if ((cmd == DM_DEV_CREATE_CMD)) {
28942+ if (cmd == DM_DEV_CREATE_CMD) {
28943 if (!*param->name) {
28944 DMWARN("name not supplied when creating device");
28945 return -EINVAL;
28946diff -urNp linux-3.0.7/drivers/md/dm-raid1.c linux-3.0.7/drivers/md/dm-raid1.c
28947--- linux-3.0.7/drivers/md/dm-raid1.c 2011-07-21 22:17:23.000000000 -0400
28948+++ linux-3.0.7/drivers/md/dm-raid1.c 2011-08-23 21:47:55.000000000 -0400
28949@@ -40,7 +40,7 @@ enum dm_raid1_error {
28950
28951 struct mirror {
28952 struct mirror_set *ms;
28953- atomic_t error_count;
28954+ atomic_unchecked_t error_count;
28955 unsigned long error_type;
28956 struct dm_dev *dev;
28957 sector_t offset;
28958@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
28959 struct mirror *m;
28960
28961 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
28962- if (!atomic_read(&m->error_count))
28963+ if (!atomic_read_unchecked(&m->error_count))
28964 return m;
28965
28966 return NULL;
28967@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
28968 * simple way to tell if a device has encountered
28969 * errors.
28970 */
28971- atomic_inc(&m->error_count);
28972+ atomic_inc_unchecked(&m->error_count);
28973
28974 if (test_and_set_bit(error_type, &m->error_type))
28975 return;
28976@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
28977 struct mirror *m = get_default_mirror(ms);
28978
28979 do {
28980- if (likely(!atomic_read(&m->error_count)))
28981+ if (likely(!atomic_read_unchecked(&m->error_count)))
28982 return m;
28983
28984 if (m-- == ms->mirror)
28985@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
28986 {
28987 struct mirror *default_mirror = get_default_mirror(m->ms);
28988
28989- return !atomic_read(&default_mirror->error_count);
28990+ return !atomic_read_unchecked(&default_mirror->error_count);
28991 }
28992
28993 static int mirror_available(struct mirror_set *ms, struct bio *bio)
28994@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
28995 */
28996 if (likely(region_in_sync(ms, region, 1)))
28997 m = choose_mirror(ms, bio->bi_sector);
28998- else if (m && atomic_read(&m->error_count))
28999+ else if (m && atomic_read_unchecked(&m->error_count))
29000 m = NULL;
29001
29002 if (likely(m))
29003@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29004 }
29005
29006 ms->mirror[mirror].ms = ms;
29007- atomic_set(&(ms->mirror[mirror].error_count), 0);
29008+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29009 ms->mirror[mirror].error_type = 0;
29010 ms->mirror[mirror].offset = offset;
29011
29012@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29013 */
29014 static char device_status_char(struct mirror *m)
29015 {
29016- if (!atomic_read(&(m->error_count)))
29017+ if (!atomic_read_unchecked(&(m->error_count)))
29018 return 'A';
29019
29020 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29021diff -urNp linux-3.0.7/drivers/md/dm-stripe.c linux-3.0.7/drivers/md/dm-stripe.c
29022--- linux-3.0.7/drivers/md/dm-stripe.c 2011-07-21 22:17:23.000000000 -0400
29023+++ linux-3.0.7/drivers/md/dm-stripe.c 2011-08-23 21:47:55.000000000 -0400
29024@@ -20,7 +20,7 @@ struct stripe {
29025 struct dm_dev *dev;
29026 sector_t physical_start;
29027
29028- atomic_t error_count;
29029+ atomic_unchecked_t error_count;
29030 };
29031
29032 struct stripe_c {
29033@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29034 kfree(sc);
29035 return r;
29036 }
29037- atomic_set(&(sc->stripe[i].error_count), 0);
29038+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29039 }
29040
29041 ti->private = sc;
29042@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29043 DMEMIT("%d ", sc->stripes);
29044 for (i = 0; i < sc->stripes; i++) {
29045 DMEMIT("%s ", sc->stripe[i].dev->name);
29046- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29047+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29048 'D' : 'A';
29049 }
29050 buffer[i] = '\0';
29051@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29052 */
29053 for (i = 0; i < sc->stripes; i++)
29054 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29055- atomic_inc(&(sc->stripe[i].error_count));
29056- if (atomic_read(&(sc->stripe[i].error_count)) <
29057+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29058+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29059 DM_IO_ERROR_THRESHOLD)
29060 schedule_work(&sc->trigger_event);
29061 }
29062diff -urNp linux-3.0.7/drivers/md/dm-table.c linux-3.0.7/drivers/md/dm-table.c
29063--- linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:09.000000000 -0400
29064+++ linux-3.0.7/drivers/md/dm-table.c 2011-10-17 23:17:19.000000000 -0400
29065@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
29066 if (!dev_size)
29067 return 0;
29068
29069- if ((start >= dev_size) || (start + len > dev_size)) {
29070+ if ((start >= dev_size) || (len > dev_size - start)) {
29071 DMWARN("%s: %s too small for target: "
29072 "start=%llu, len=%llu, dev_size=%llu",
29073 dm_device_name(ti->table->md), bdevname(bdev, b),
29074diff -urNp linux-3.0.7/drivers/md/md.c linux-3.0.7/drivers/md/md.c
29075--- linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:09.000000000 -0400
29076+++ linux-3.0.7/drivers/md/md.c 2011-10-17 23:17:19.000000000 -0400
29077@@ -231,10 +231,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
29078 * start build, activate spare
29079 */
29080 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29081-static atomic_t md_event_count;
29082+static atomic_unchecked_t md_event_count;
29083 void md_new_event(mddev_t *mddev)
29084 {
29085- atomic_inc(&md_event_count);
29086+ atomic_inc_unchecked(&md_event_count);
29087 wake_up(&md_event_waiters);
29088 }
29089 EXPORT_SYMBOL_GPL(md_new_event);
29090@@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29091 */
29092 static void md_new_event_inintr(mddev_t *mddev)
29093 {
29094- atomic_inc(&md_event_count);
29095+ atomic_inc_unchecked(&md_event_count);
29096 wake_up(&md_event_waiters);
29097 }
29098
29099@@ -1475,7 +1475,7 @@ static int super_1_load(mdk_rdev_t *rdev
29100
29101 rdev->preferred_minor = 0xffff;
29102 rdev->data_offset = le64_to_cpu(sb->data_offset);
29103- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29104+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29105
29106 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29107 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29108@@ -1653,7 +1653,7 @@ static void super_1_sync(mddev_t *mddev,
29109 else
29110 sb->resync_offset = cpu_to_le64(0);
29111
29112- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29113+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29114
29115 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29116 sb->size = cpu_to_le64(mddev->dev_sectors);
29117@@ -2446,7 +2446,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29118 static ssize_t
29119 errors_show(mdk_rdev_t *rdev, char *page)
29120 {
29121- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29122+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29123 }
29124
29125 static ssize_t
29126@@ -2455,7 +2455,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29127 char *e;
29128 unsigned long n = simple_strtoul(buf, &e, 10);
29129 if (*buf && (*e == 0 || *e == '\n')) {
29130- atomic_set(&rdev->corrected_errors, n);
29131+ atomic_set_unchecked(&rdev->corrected_errors, n);
29132 return len;
29133 }
29134 return -EINVAL;
29135@@ -2811,8 +2811,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
29136 rdev->last_read_error.tv_sec = 0;
29137 rdev->last_read_error.tv_nsec = 0;
29138 atomic_set(&rdev->nr_pending, 0);
29139- atomic_set(&rdev->read_errors, 0);
29140- atomic_set(&rdev->corrected_errors, 0);
29141+ atomic_set_unchecked(&rdev->read_errors, 0);
29142+ atomic_set_unchecked(&rdev->corrected_errors, 0);
29143
29144 INIT_LIST_HEAD(&rdev->same_set);
29145 init_waitqueue_head(&rdev->blocked_wait);
29146@@ -6440,7 +6440,7 @@ static int md_seq_show(struct seq_file *
29147
29148 spin_unlock(&pers_lock);
29149 seq_printf(seq, "\n");
29150- mi->event = atomic_read(&md_event_count);
29151+ mi->event = atomic_read_unchecked(&md_event_count);
29152 return 0;
29153 }
29154 if (v == (void*)2) {
29155@@ -6529,7 +6529,7 @@ static int md_seq_show(struct seq_file *
29156 chunk_kb ? "KB" : "B");
29157 if (bitmap->file) {
29158 seq_printf(seq, ", file: ");
29159- seq_path(seq, &bitmap->file->f_path, " \t\n");
29160+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29161 }
29162
29163 seq_printf(seq, "\n");
29164@@ -6563,7 +6563,7 @@ static int md_seq_open(struct inode *ino
29165 else {
29166 struct seq_file *p = file->private_data;
29167 p->private = mi;
29168- mi->event = atomic_read(&md_event_count);
29169+ mi->event = atomic_read_unchecked(&md_event_count);
29170 }
29171 return error;
29172 }
29173@@ -6579,7 +6579,7 @@ static unsigned int mdstat_poll(struct f
29174 /* always allow read */
29175 mask = POLLIN | POLLRDNORM;
29176
29177- if (mi->event != atomic_read(&md_event_count))
29178+ if (mi->event != atomic_read_unchecked(&md_event_count))
29179 mask |= POLLERR | POLLPRI;
29180 return mask;
29181 }
29182@@ -6623,7 +6623,7 @@ static int is_mddev_idle(mddev_t *mddev,
29183 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29184 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29185 (int)part_stat_read(&disk->part0, sectors[1]) -
29186- atomic_read(&disk->sync_io);
29187+ atomic_read_unchecked(&disk->sync_io);
29188 /* sync IO will cause sync_io to increase before the disk_stats
29189 * as sync_io is counted when a request starts, and
29190 * disk_stats is counted when it completes.
29191diff -urNp linux-3.0.7/drivers/md/md.h linux-3.0.7/drivers/md/md.h
29192--- linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:09.000000000 -0400
29193+++ linux-3.0.7/drivers/md/md.h 2011-10-17 23:17:19.000000000 -0400
29194@@ -97,13 +97,13 @@ struct mdk_rdev_s
29195 * only maintained for arrays that
29196 * support hot removal
29197 */
29198- atomic_t read_errors; /* number of consecutive read errors that
29199+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29200 * we have tried to ignore.
29201 */
29202 struct timespec last_read_error; /* monotonic time since our
29203 * last read error
29204 */
29205- atomic_t corrected_errors; /* number of corrected read errors,
29206+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29207 * for reporting to userspace and storing
29208 * in superblock.
29209 */
29210@@ -344,7 +344,7 @@ static inline void rdev_dec_pending(mdk_
29211
29212 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29213 {
29214- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29215+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29216 }
29217
29218 struct mdk_personality
29219diff -urNp linux-3.0.7/drivers/md/raid10.c linux-3.0.7/drivers/md/raid10.c
29220--- linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:09.000000000 -0400
29221+++ linux-3.0.7/drivers/md/raid10.c 2011-10-17 23:17:19.000000000 -0400
29222@@ -1186,7 +1186,7 @@ static void end_sync_read(struct bio *bi
29223 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
29224 set_bit(R10BIO_Uptodate, &r10_bio->state);
29225 else {
29226- atomic_add(r10_bio->sectors,
29227+ atomic_add_unchecked(r10_bio->sectors,
29228 &conf->mirrors[d].rdev->corrected_errors);
29229 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
29230 md_error(r10_bio->mddev,
29231@@ -1394,7 +1394,7 @@ static void check_decay_read_errors(mdde
29232 {
29233 struct timespec cur_time_mon;
29234 unsigned long hours_since_last;
29235- unsigned int read_errors = atomic_read(&rdev->read_errors);
29236+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29237
29238 ktime_get_ts(&cur_time_mon);
29239
29240@@ -1416,9 +1416,9 @@ static void check_decay_read_errors(mdde
29241 * overflowing the shift of read_errors by hours_since_last.
29242 */
29243 if (hours_since_last >= 8 * sizeof(read_errors))
29244- atomic_set(&rdev->read_errors, 0);
29245+ atomic_set_unchecked(&rdev->read_errors, 0);
29246 else
29247- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29248+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29249 }
29250
29251 /*
29252@@ -1448,8 +1448,8 @@ static void fix_read_error(conf_t *conf,
29253 return;
29254
29255 check_decay_read_errors(mddev, rdev);
29256- atomic_inc(&rdev->read_errors);
29257- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29258+ atomic_inc_unchecked(&rdev->read_errors);
29259+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29260 char b[BDEVNAME_SIZE];
29261 bdevname(rdev->bdev, b);
29262
29263@@ -1457,7 +1457,7 @@ static void fix_read_error(conf_t *conf,
29264 "md/raid10:%s: %s: Raid device exceeded "
29265 "read_error threshold [cur %d:max %d]\n",
29266 mdname(mddev), b,
29267- atomic_read(&rdev->read_errors), max_read_errors);
29268+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29269 printk(KERN_NOTICE
29270 "md/raid10:%s: %s: Failing raid device\n",
29271 mdname(mddev), b);
29272@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
29273 test_bit(In_sync, &rdev->flags)) {
29274 atomic_inc(&rdev->nr_pending);
29275 rcu_read_unlock();
29276- atomic_add(s, &rdev->corrected_errors);
29277+ atomic_add_unchecked(s, &rdev->corrected_errors);
29278 if (sync_page_io(rdev,
29279 r10_bio->devs[sl].addr +
29280 sect,
29281diff -urNp linux-3.0.7/drivers/md/raid1.c linux-3.0.7/drivers/md/raid1.c
29282--- linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:09.000000000 -0400
29283+++ linux-3.0.7/drivers/md/raid1.c 2011-10-17 23:17:19.000000000 -0400
29284@@ -1263,7 +1263,7 @@ static int fix_sync_read_error(r1bio_t *
29285 rdev_dec_pending(rdev, mddev);
29286 md_error(mddev, rdev);
29287 } else
29288- atomic_add(s, &rdev->corrected_errors);
29289+ atomic_add_unchecked(s, &rdev->corrected_errors);
29290 }
29291 d = start;
29292 while (d != r1_bio->read_disk) {
29293@@ -1492,7 +1492,7 @@ static void fix_read_error(conf_t *conf,
29294 /* Well, this device is dead */
29295 md_error(mddev, rdev);
29296 else {
29297- atomic_add(s, &rdev->corrected_errors);
29298+ atomic_add_unchecked(s, &rdev->corrected_errors);
29299 printk(KERN_INFO
29300 "md/raid1:%s: read error corrected "
29301 "(%d sectors at %llu on %s)\n",
29302diff -urNp linux-3.0.7/drivers/md/raid5.c linux-3.0.7/drivers/md/raid5.c
29303--- linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:09.000000000 -0400
29304+++ linux-3.0.7/drivers/md/raid5.c 2011-10-17 23:17:19.000000000 -0400
29305@@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
29306 bi->bi_next = NULL;
29307 if ((rw & WRITE) &&
29308 test_bit(R5_ReWrite, &sh->dev[i].flags))
29309- atomic_add(STRIPE_SECTORS,
29310+ atomic_add_unchecked(STRIPE_SECTORS,
29311 &rdev->corrected_errors);
29312 generic_make_request(bi);
29313 } else {
29314@@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
29315 clear_bit(R5_ReadError, &sh->dev[i].flags);
29316 clear_bit(R5_ReWrite, &sh->dev[i].flags);
29317 }
29318- if (atomic_read(&conf->disks[i].rdev->read_errors))
29319- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29320+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29321+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29322 } else {
29323 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29324 int retry = 0;
29325 rdev = conf->disks[i].rdev;
29326
29327 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29328- atomic_inc(&rdev->read_errors);
29329+ atomic_inc_unchecked(&rdev->read_errors);
29330 if (conf->mddev->degraded >= conf->max_degraded)
29331 printk_rl(KERN_WARNING
29332 "md/raid:%s: read error not correctable "
29333@@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
29334 (unsigned long long)(sh->sector
29335 + rdev->data_offset),
29336 bdn);
29337- else if (atomic_read(&rdev->read_errors)
29338+ else if (atomic_read_unchecked(&rdev->read_errors)
29339 > conf->max_nr_stripes)
29340 printk(KERN_WARNING
29341 "md/raid:%s: Too many read errors, failing device %s.\n",
29342@@ -1945,6 +1945,7 @@ static sector_t compute_blocknr(struct s
29343 sector_t r_sector;
29344 struct stripe_head sh2;
29345
29346+ pax_track_stack();
29347
29348 chunk_offset = sector_div(new_sector, sectors_per_chunk);
29349 stripe = new_sector;
29350diff -urNp linux-3.0.7/drivers/media/common/saa7146_hlp.c linux-3.0.7/drivers/media/common/saa7146_hlp.c
29351--- linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-07-21 22:17:23.000000000 -0400
29352+++ linux-3.0.7/drivers/media/common/saa7146_hlp.c 2011-08-23 21:48:14.000000000 -0400
29353@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29354
29355 int x[32], y[32], w[32], h[32];
29356
29357+ pax_track_stack();
29358+
29359 /* clear out memory */
29360 memset(&line_list[0], 0x00, sizeof(u32)*32);
29361 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29362diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29363--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-07-21 22:17:23.000000000 -0400
29364+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-23 21:48:14.000000000 -0400
29365@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29366 u8 buf[HOST_LINK_BUF_SIZE];
29367 int i;
29368
29369+ pax_track_stack();
29370+
29371 dprintk("%s\n", __func__);
29372
29373 /* check if we have space for a link buf in the rx_buffer */
29374@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29375 unsigned long timeout;
29376 int written;
29377
29378+ pax_track_stack();
29379+
29380 dprintk("%s\n", __func__);
29381
29382 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29383diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h
29384--- linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-07-21 22:17:23.000000000 -0400
29385+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-24 18:24:40.000000000 -0400
29386@@ -68,12 +68,12 @@ struct dvb_demux_feed {
29387 union {
29388 struct dmx_ts_feed ts;
29389 struct dmx_section_feed sec;
29390- } feed;
29391+ } __no_const feed;
29392
29393 union {
29394 dmx_ts_cb ts;
29395 dmx_section_cb sec;
29396- } cb;
29397+ } __no_const cb;
29398
29399 struct dvb_demux *demux;
29400 void *priv;
29401diff -urNp linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c
29402--- linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-07-21 22:17:23.000000000 -0400
29403+++ linux-3.0.7/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-24 18:24:19.000000000 -0400
29404@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29405 const struct dvb_device *template, void *priv, int type)
29406 {
29407 struct dvb_device *dvbdev;
29408- struct file_operations *dvbdevfops;
29409+ file_operations_no_const *dvbdevfops;
29410 struct device *clsdev;
29411 int minor;
29412 int id;
29413diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c
29414--- linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-07-21 22:17:23.000000000 -0400
29415+++ linux-3.0.7/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-24 18:26:33.000000000 -0400
29416@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29417 struct dib0700_adapter_state {
29418 int (*set_param_save) (struct dvb_frontend *,
29419 struct dvb_frontend_parameters *);
29420-};
29421+} __no_const;
29422
29423 static int dib7070_set_param_override(struct dvb_frontend *fe,
29424 struct dvb_frontend_parameters *fep)
29425diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c
29426--- linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-07-21 22:17:23.000000000 -0400
29427+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-23 21:48:14.000000000 -0400
29428@@ -434,6 +434,8 @@ int dib0700_download_firmware(struct usb
29429 if (!buf)
29430 return -ENOMEM;
29431
29432+ pax_track_stack();
29433+
29434 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29435 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29436 hx.addr, hx.len, hx.chk);
29437diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h
29438--- linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-07-21 22:17:23.000000000 -0400
29439+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dibusb.h 2011-08-24 18:27:27.000000000 -0400
29440@@ -97,7 +97,7 @@
29441 #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
29442
29443 struct dibusb_state {
29444- struct dib_fe_xfer_ops ops;
29445+ dib_fe_xfer_ops_no_const ops;
29446 int mt2060_present;
29447 u8 tuner_addr;
29448 };
29449diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c
29450--- linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-07-21 22:17:23.000000000 -0400
29451+++ linux-3.0.7/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-24 18:27:45.000000000 -0400
29452@@ -95,7 +95,7 @@ struct su3000_state {
29453
29454 struct s6x0_state {
29455 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29456-};
29457+} __no_const;
29458
29459 /* debug */
29460 static int dvb_usb_dw2102_debug;
29461diff -urNp linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c
29462--- linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-07-21 22:17:23.000000000 -0400
29463+++ linux-3.0.7/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-23 21:48:14.000000000 -0400
29464@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29465 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29466 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29467
29468+ pax_track_stack();
29469
29470 data[0] = 0x8a;
29471 len_in = 1;
29472@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
29473 int ret = 0, len_in;
29474 u8 data[512] = {0};
29475
29476+ pax_track_stack();
29477+
29478 data[0] = 0x0a;
29479 len_in = 1;
29480 info("FRM Firmware Cold Reset");
29481diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000.h linux-3.0.7/drivers/media/dvb/frontends/dib3000.h
29482--- linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-07-21 22:17:23.000000000 -0400
29483+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000.h 2011-10-07 19:07:39.000000000 -0400
29484@@ -40,10 +40,11 @@ struct dib_fe_xfer_ops
29485 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
29486 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
29487 };
29488+typedef struct dib_fe_xfer_ops __no_const dib_fe_xfer_ops_no_const;
29489
29490 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
29491 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29492- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops);
29493+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops);
29494 #else
29495 static inline struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29496 struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29497diff -urNp linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c
29498--- linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-07-21 22:17:23.000000000 -0400
29499+++ linux-3.0.7/drivers/media/dvb/frontends/dib3000mb.c 2011-08-24 18:28:42.000000000 -0400
29500@@ -756,7 +756,7 @@ static int dib3000mb_tuner_pass_ctrl(str
29501 static struct dvb_frontend_ops dib3000mb_ops;
29502
29503 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
29504- struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
29505+ struct i2c_adapter* i2c, dib_fe_xfer_ops_no_const *xfer_ops)
29506 {
29507 struct dib3000_state* state = NULL;
29508
29509diff -urNp linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c
29510--- linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-07-21 22:17:23.000000000 -0400
29511+++ linux-3.0.7/drivers/media/dvb/frontends/mb86a16.c 2011-08-23 21:48:14.000000000 -0400
29512@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
29513 int ret = -1;
29514 int sync;
29515
29516+ pax_track_stack();
29517+
29518 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
29519
29520 fcp = 3000;
29521diff -urNp linux-3.0.7/drivers/media/dvb/frontends/or51211.c linux-3.0.7/drivers/media/dvb/frontends/or51211.c
29522--- linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-07-21 22:17:23.000000000 -0400
29523+++ linux-3.0.7/drivers/media/dvb/frontends/or51211.c 2011-08-23 21:48:14.000000000 -0400
29524@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
29525 u8 tudata[585];
29526 int i;
29527
29528+ pax_track_stack();
29529+
29530 dprintk("Firmware is %zd bytes\n",fw->size);
29531
29532 /* Get eprom data */
29533diff -urNp linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c
29534--- linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-07-21 22:17:23.000000000 -0400
29535+++ linux-3.0.7/drivers/media/dvb/ngene/ngene-cards.c 2011-10-11 10:44:33.000000000 -0400
29536@@ -379,7 +379,7 @@ static struct ngene_info ngene_info_m780
29537
29538 /****************************************************************************/
29539
29540-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
29541+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
29542 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
29543 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
29544 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
29545diff -urNp linux-3.0.7/drivers/media/video/cx18/cx18-driver.c linux-3.0.7/drivers/media/video/cx18/cx18-driver.c
29546--- linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-07-21 22:17:23.000000000 -0400
29547+++ linux-3.0.7/drivers/media/video/cx18/cx18-driver.c 2011-08-23 21:48:14.000000000 -0400
29548@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
29549 struct i2c_client c;
29550 u8 eedata[256];
29551
29552+ pax_track_stack();
29553+
29554 memset(&c, 0, sizeof(c));
29555 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
29556 c.adapter = &cx->i2c_adap[0];
29557diff -urNp linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c
29558--- linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-07-21 22:17:23.000000000 -0400
29559+++ linux-3.0.7/drivers/media/video/cx23885/cx23885-input.c 2011-08-23 21:48:14.000000000 -0400
29560@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
29561 bool handle = false;
29562 struct ir_raw_event ir_core_event[64];
29563
29564+ pax_track_stack();
29565+
29566 do {
29567 num = 0;
29568 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
29569diff -urNp linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c
29570--- linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-07-21 22:17:23.000000000 -0400
29571+++ linux-3.0.7/drivers/media/video/cx88/cx88-alsa.c 2011-10-11 10:44:33.000000000 -0400
29572@@ -764,7 +764,7 @@ static struct snd_kcontrol_new snd_cx88_
29573 * Only boards with eeprom and byte 1 at eeprom=1 have it
29574 */
29575
29576-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
29577+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
29578 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29579 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
29580 {0, }
29581diff -urNp linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
29582--- linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-07-21 22:17:23.000000000 -0400
29583+++ linux-3.0.7/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-23 21:48:14.000000000 -0400
29584@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
29585 u8 *eeprom;
29586 struct tveeprom tvdata;
29587
29588+ pax_track_stack();
29589+
29590 memset(&tvdata,0,sizeof(tvdata));
29591
29592 eeprom = pvr2_eeprom_fetch(hdw);
29593diff -urNp linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c
29594--- linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-07-21 22:17:23.000000000 -0400
29595+++ linux-3.0.7/drivers/media/video/saa7134/saa6752hs.c 2011-08-23 21:48:14.000000000 -0400
29596@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
29597 unsigned char localPAT[256];
29598 unsigned char localPMT[256];
29599
29600+ pax_track_stack();
29601+
29602 /* Set video format - must be done first as it resets other settings */
29603 set_reg8(client, 0x41, h->video_format);
29604
29605diff -urNp linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c
29606--- linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-07-21 22:17:23.000000000 -0400
29607+++ linux-3.0.7/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-23 21:48:14.000000000 -0400
29608@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
29609 u8 tmp[512];
29610 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29611
29612+ pax_track_stack();
29613+
29614 /* While any outstand message on the bus exists... */
29615 do {
29616
29617@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
29618 u8 tmp[512];
29619 dprintk(DBGLVL_CMD, "%s()\n", __func__);
29620
29621+ pax_track_stack();
29622+
29623 while (loop) {
29624
29625 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
29626diff -urNp linux-3.0.7/drivers/media/video/timblogiw.c linux-3.0.7/drivers/media/video/timblogiw.c
29627--- linux-3.0.7/drivers/media/video/timblogiw.c 2011-07-21 22:17:23.000000000 -0400
29628+++ linux-3.0.7/drivers/media/video/timblogiw.c 2011-08-24 18:29:20.000000000 -0400
29629@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *f
29630
29631 /* Platform device functions */
29632
29633-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
29634+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
29635 .vidioc_querycap = timblogiw_querycap,
29636 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
29637 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
29638diff -urNp linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c
29639--- linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-07-21 22:17:23.000000000 -0400
29640+++ linux-3.0.7/drivers/media/video/usbvision/usbvision-core.c 2011-08-23 21:48:14.000000000 -0400
29641@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
29642 unsigned char rv, gv, bv;
29643 static unsigned char *Y, *U, *V;
29644
29645+ pax_track_stack();
29646+
29647 frame = usbvision->cur_frame;
29648 image_size = frame->frmwidth * frame->frmheight;
29649 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
29650diff -urNp linux-3.0.7/drivers/media/video/videobuf-dma-sg.c linux-3.0.7/drivers/media/video/videobuf-dma-sg.c
29651--- linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-07-21 22:17:23.000000000 -0400
29652+++ linux-3.0.7/drivers/media/video/videobuf-dma-sg.c 2011-08-23 21:48:14.000000000 -0400
29653@@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
29654 {
29655 struct videobuf_queue q;
29656
29657+ pax_track_stack();
29658+
29659 /* Required to make generic handler to call __videobuf_alloc */
29660 q.int_ops = &sg_ops;
29661
29662diff -urNp linux-3.0.7/drivers/message/fusion/mptbase.c linux-3.0.7/drivers/message/fusion/mptbase.c
29663--- linux-3.0.7/drivers/message/fusion/mptbase.c 2011-07-21 22:17:23.000000000 -0400
29664+++ linux-3.0.7/drivers/message/fusion/mptbase.c 2011-08-23 21:48:14.000000000 -0400
29665@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
29666 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
29667 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
29668
29669+#ifdef CONFIG_GRKERNSEC_HIDESYM
29670+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
29671+#else
29672 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
29673 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
29674+#endif
29675+
29676 /*
29677 * Rounding UP to nearest 4-kB boundary here...
29678 */
29679diff -urNp linux-3.0.7/drivers/message/fusion/mptsas.c linux-3.0.7/drivers/message/fusion/mptsas.c
29680--- linux-3.0.7/drivers/message/fusion/mptsas.c 2011-07-21 22:17:23.000000000 -0400
29681+++ linux-3.0.7/drivers/message/fusion/mptsas.c 2011-08-23 21:47:55.000000000 -0400
29682@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
29683 return 0;
29684 }
29685
29686+static inline void
29687+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29688+{
29689+ if (phy_info->port_details) {
29690+ phy_info->port_details->rphy = rphy;
29691+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29692+ ioc->name, rphy));
29693+ }
29694+
29695+ if (rphy) {
29696+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29697+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29698+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29699+ ioc->name, rphy, rphy->dev.release));
29700+ }
29701+}
29702+
29703 /* no mutex */
29704 static void
29705 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
29706@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
29707 return NULL;
29708 }
29709
29710-static inline void
29711-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
29712-{
29713- if (phy_info->port_details) {
29714- phy_info->port_details->rphy = rphy;
29715- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
29716- ioc->name, rphy));
29717- }
29718-
29719- if (rphy) {
29720- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
29721- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
29722- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
29723- ioc->name, rphy, rphy->dev.release));
29724- }
29725-}
29726-
29727 static inline struct sas_port *
29728 mptsas_get_port(struct mptsas_phyinfo *phy_info)
29729 {
29730diff -urNp linux-3.0.7/drivers/message/fusion/mptscsih.c linux-3.0.7/drivers/message/fusion/mptscsih.c
29731--- linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-07-21 22:17:23.000000000 -0400
29732+++ linux-3.0.7/drivers/message/fusion/mptscsih.c 2011-08-23 21:47:55.000000000 -0400
29733@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
29734
29735 h = shost_priv(SChost);
29736
29737- if (h) {
29738- if (h->info_kbuf == NULL)
29739- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29740- return h->info_kbuf;
29741- h->info_kbuf[0] = '\0';
29742+ if (!h)
29743+ return NULL;
29744
29745- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29746- h->info_kbuf[size-1] = '\0';
29747- }
29748+ if (h->info_kbuf == NULL)
29749+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
29750+ return h->info_kbuf;
29751+ h->info_kbuf[0] = '\0';
29752+
29753+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
29754+ h->info_kbuf[size-1] = '\0';
29755
29756 return h->info_kbuf;
29757 }
29758diff -urNp linux-3.0.7/drivers/message/i2o/i2o_config.c linux-3.0.7/drivers/message/i2o/i2o_config.c
29759--- linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-07-21 22:17:23.000000000 -0400
29760+++ linux-3.0.7/drivers/message/i2o/i2o_config.c 2011-08-23 21:48:14.000000000 -0400
29761@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
29762 struct i2o_message *msg;
29763 unsigned int iop;
29764
29765+ pax_track_stack();
29766+
29767 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
29768 return -EFAULT;
29769
29770diff -urNp linux-3.0.7/drivers/message/i2o/i2o_proc.c linux-3.0.7/drivers/message/i2o/i2o_proc.c
29771--- linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-07-21 22:17:23.000000000 -0400
29772+++ linux-3.0.7/drivers/message/i2o/i2o_proc.c 2011-08-23 21:47:55.000000000 -0400
29773@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
29774 "Array Controller Device"
29775 };
29776
29777-static char *chtostr(u8 * chars, int n)
29778-{
29779- char tmp[256];
29780- tmp[0] = 0;
29781- return strncat(tmp, (char *)chars, n);
29782-}
29783-
29784 static int i2o_report_query_status(struct seq_file *seq, int block_status,
29785 char *group)
29786 {
29787@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
29788
29789 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
29790 seq_printf(seq, "%-#8x", ddm_table.module_id);
29791- seq_printf(seq, "%-29s",
29792- chtostr(ddm_table.module_name_version, 28));
29793+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
29794 seq_printf(seq, "%9d ", ddm_table.data_size);
29795 seq_printf(seq, "%8d", ddm_table.code_size);
29796
29797@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
29798
29799 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
29800 seq_printf(seq, "%-#8x", dst->module_id);
29801- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
29802- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
29803+ seq_printf(seq, "%-.28s", dst->module_name_version);
29804+ seq_printf(seq, "%-.8s", dst->date);
29805 seq_printf(seq, "%8d ", dst->module_size);
29806 seq_printf(seq, "%8d ", dst->mpb_size);
29807 seq_printf(seq, "0x%04x", dst->module_flags);
29808@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
29809 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
29810 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
29811 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
29812- seq_printf(seq, "Vendor info : %s\n",
29813- chtostr((u8 *) (work32 + 2), 16));
29814- seq_printf(seq, "Product info : %s\n",
29815- chtostr((u8 *) (work32 + 6), 16));
29816- seq_printf(seq, "Description : %s\n",
29817- chtostr((u8 *) (work32 + 10), 16));
29818- seq_printf(seq, "Product rev. : %s\n",
29819- chtostr((u8 *) (work32 + 14), 8));
29820+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
29821+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
29822+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
29823+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
29824
29825 seq_printf(seq, "Serial number : ");
29826 print_serial_number(seq, (u8 *) (work32 + 16),
29827@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
29828 }
29829
29830 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
29831- seq_printf(seq, "Module name : %s\n",
29832- chtostr(result.module_name, 24));
29833- seq_printf(seq, "Module revision : %s\n",
29834- chtostr(result.module_rev, 8));
29835+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
29836+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
29837
29838 seq_printf(seq, "Serial number : ");
29839 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
29840@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
29841 return 0;
29842 }
29843
29844- seq_printf(seq, "Device name : %s\n",
29845- chtostr(result.device_name, 64));
29846- seq_printf(seq, "Service name : %s\n",
29847- chtostr(result.service_name, 64));
29848- seq_printf(seq, "Physical name : %s\n",
29849- chtostr(result.physical_location, 64));
29850- seq_printf(seq, "Instance number : %s\n",
29851- chtostr(result.instance_number, 4));
29852+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
29853+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
29854+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
29855+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
29856
29857 return 0;
29858 }
29859diff -urNp linux-3.0.7/drivers/message/i2o/iop.c linux-3.0.7/drivers/message/i2o/iop.c
29860--- linux-3.0.7/drivers/message/i2o/iop.c 2011-07-21 22:17:23.000000000 -0400
29861+++ linux-3.0.7/drivers/message/i2o/iop.c 2011-08-23 21:47:55.000000000 -0400
29862@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
29863
29864 spin_lock_irqsave(&c->context_list_lock, flags);
29865
29866- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
29867- atomic_inc(&c->context_list_counter);
29868+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
29869+ atomic_inc_unchecked(&c->context_list_counter);
29870
29871- entry->context = atomic_read(&c->context_list_counter);
29872+ entry->context = atomic_read_unchecked(&c->context_list_counter);
29873
29874 list_add(&entry->list, &c->context_list);
29875
29876@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
29877
29878 #if BITS_PER_LONG == 64
29879 spin_lock_init(&c->context_list_lock);
29880- atomic_set(&c->context_list_counter, 0);
29881+ atomic_set_unchecked(&c->context_list_counter, 0);
29882 INIT_LIST_HEAD(&c->context_list);
29883 #endif
29884
29885diff -urNp linux-3.0.7/drivers/mfd/ab3100-core.c linux-3.0.7/drivers/mfd/ab3100-core.c
29886--- linux-3.0.7/drivers/mfd/ab3100-core.c 2011-07-21 22:17:23.000000000 -0400
29887+++ linux-3.0.7/drivers/mfd/ab3100-core.c 2011-10-11 10:44:33.000000000 -0400
29888@@ -809,7 +809,7 @@ struct ab_family_id {
29889 char *name;
29890 };
29891
29892-static const struct ab_family_id ids[] __devinitdata = {
29893+static const struct ab_family_id ids[] __devinitconst = {
29894 /* AB3100 */
29895 {
29896 .id = 0xc0,
29897diff -urNp linux-3.0.7/drivers/mfd/abx500-core.c linux-3.0.7/drivers/mfd/abx500-core.c
29898--- linux-3.0.7/drivers/mfd/abx500-core.c 2011-07-21 22:17:23.000000000 -0400
29899+++ linux-3.0.7/drivers/mfd/abx500-core.c 2011-08-23 21:47:55.000000000 -0400
29900@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
29901
29902 struct abx500_device_entry {
29903 struct list_head list;
29904- struct abx500_ops ops;
29905+ abx500_ops_no_const ops;
29906 struct device *dev;
29907 };
29908
29909diff -urNp linux-3.0.7/drivers/mfd/janz-cmodio.c linux-3.0.7/drivers/mfd/janz-cmodio.c
29910--- linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-07-21 22:17:23.000000000 -0400
29911+++ linux-3.0.7/drivers/mfd/janz-cmodio.c 2011-08-23 21:47:55.000000000 -0400
29912@@ -13,6 +13,7 @@
29913
29914 #include <linux/kernel.h>
29915 #include <linux/module.h>
29916+#include <linux/slab.h>
29917 #include <linux/init.h>
29918 #include <linux/pci.h>
29919 #include <linux/interrupt.h>
29920diff -urNp linux-3.0.7/drivers/mfd/wm8350-i2c.c linux-3.0.7/drivers/mfd/wm8350-i2c.c
29921--- linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-07-21 22:17:23.000000000 -0400
29922+++ linux-3.0.7/drivers/mfd/wm8350-i2c.c 2011-08-23 21:48:14.000000000 -0400
29923@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
29924 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
29925 int ret;
29926
29927+ pax_track_stack();
29928+
29929 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
29930 return -EINVAL;
29931
29932diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c
29933--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:09.000000000 -0400
29934+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.c 2011-10-17 23:17:19.000000000 -0400
29935@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
29936 * the lid is closed. This leads to interrupts as soon as a little move
29937 * is done.
29938 */
29939- atomic_inc(&lis3_dev.count);
29940+ atomic_inc_unchecked(&lis3_dev.count);
29941
29942 wake_up_interruptible(&lis3_dev.misc_wait);
29943 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29944@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
29945 if (lis3_dev.pm_dev)
29946 pm_runtime_get_sync(lis3_dev.pm_dev);
29947
29948- atomic_set(&lis3_dev.count, 0);
29949+ atomic_set_unchecked(&lis3_dev.count, 0);
29950 return 0;
29951 }
29952
29953@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
29954 add_wait_queue(&lis3_dev.misc_wait, &wait);
29955 while (true) {
29956 set_current_state(TASK_INTERRUPTIBLE);
29957- data = atomic_xchg(&lis3_dev.count, 0);
29958+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29959 if (data)
29960 break;
29961
29962@@ -585,7 +585,7 @@ out:
29963 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29964 {
29965 poll_wait(file, &lis3_dev.misc_wait, wait);
29966- if (atomic_read(&lis3_dev.count))
29967+ if (atomic_read_unchecked(&lis3_dev.count))
29968 return POLLIN | POLLRDNORM;
29969 return 0;
29970 }
29971diff -urNp linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h
29972--- linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-07-21 22:17:23.000000000 -0400
29973+++ linux-3.0.7/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-23 21:47:55.000000000 -0400
29974@@ -265,7 +265,7 @@ struct lis3lv02d {
29975 struct input_polled_dev *idev; /* input device */
29976 struct platform_device *pdev; /* platform device */
29977 struct regulator_bulk_data regulators[2];
29978- atomic_t count; /* interrupt count after last read */
29979+ atomic_unchecked_t count; /* interrupt count after last read */
29980 union axis_conversion ac; /* hw -> logical axis */
29981 int mapped_btns[3];
29982
29983diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c
29984--- linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-07-21 22:17:23.000000000 -0400
29985+++ linux-3.0.7/drivers/misc/sgi-gru/gruhandles.c 2011-08-23 21:47:55.000000000 -0400
29986@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
29987 unsigned long nsec;
29988
29989 nsec = CLKS2NSEC(clks);
29990- atomic_long_inc(&mcs_op_statistics[op].count);
29991- atomic_long_add(nsec, &mcs_op_statistics[op].total);
29992+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
29993+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
29994 if (mcs_op_statistics[op].max < nsec)
29995 mcs_op_statistics[op].max = nsec;
29996 }
29997diff -urNp linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c
29998--- linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-07-21 22:17:23.000000000 -0400
29999+++ linux-3.0.7/drivers/misc/sgi-gru/gruprocfs.c 2011-08-23 21:47:55.000000000 -0400
30000@@ -32,9 +32,9 @@
30001
30002 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30003
30004-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30005+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30006 {
30007- unsigned long val = atomic_long_read(v);
30008+ unsigned long val = atomic_long_read_unchecked(v);
30009
30010 seq_printf(s, "%16lu %s\n", val, id);
30011 }
30012@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30013
30014 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30015 for (op = 0; op < mcsop_last; op++) {
30016- count = atomic_long_read(&mcs_op_statistics[op].count);
30017- total = atomic_long_read(&mcs_op_statistics[op].total);
30018+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30019+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30020 max = mcs_op_statistics[op].max;
30021 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30022 count ? total / count : 0, max);
30023diff -urNp linux-3.0.7/drivers/misc/sgi-gru/grutables.h linux-3.0.7/drivers/misc/sgi-gru/grutables.h
30024--- linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-07-21 22:17:23.000000000 -0400
30025+++ linux-3.0.7/drivers/misc/sgi-gru/grutables.h 2011-08-23 21:47:55.000000000 -0400
30026@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30027 * GRU statistics.
30028 */
30029 struct gru_stats_s {
30030- atomic_long_t vdata_alloc;
30031- atomic_long_t vdata_free;
30032- atomic_long_t gts_alloc;
30033- atomic_long_t gts_free;
30034- atomic_long_t gms_alloc;
30035- atomic_long_t gms_free;
30036- atomic_long_t gts_double_allocate;
30037- atomic_long_t assign_context;
30038- atomic_long_t assign_context_failed;
30039- atomic_long_t free_context;
30040- atomic_long_t load_user_context;
30041- atomic_long_t load_kernel_context;
30042- atomic_long_t lock_kernel_context;
30043- atomic_long_t unlock_kernel_context;
30044- atomic_long_t steal_user_context;
30045- atomic_long_t steal_kernel_context;
30046- atomic_long_t steal_context_failed;
30047- atomic_long_t nopfn;
30048- atomic_long_t asid_new;
30049- atomic_long_t asid_next;
30050- atomic_long_t asid_wrap;
30051- atomic_long_t asid_reuse;
30052- atomic_long_t intr;
30053- atomic_long_t intr_cbr;
30054- atomic_long_t intr_tfh;
30055- atomic_long_t intr_spurious;
30056- atomic_long_t intr_mm_lock_failed;
30057- atomic_long_t call_os;
30058- atomic_long_t call_os_wait_queue;
30059- atomic_long_t user_flush_tlb;
30060- atomic_long_t user_unload_context;
30061- atomic_long_t user_exception;
30062- atomic_long_t set_context_option;
30063- atomic_long_t check_context_retarget_intr;
30064- atomic_long_t check_context_unload;
30065- atomic_long_t tlb_dropin;
30066- atomic_long_t tlb_preload_page;
30067- atomic_long_t tlb_dropin_fail_no_asid;
30068- atomic_long_t tlb_dropin_fail_upm;
30069- atomic_long_t tlb_dropin_fail_invalid;
30070- atomic_long_t tlb_dropin_fail_range_active;
30071- atomic_long_t tlb_dropin_fail_idle;
30072- atomic_long_t tlb_dropin_fail_fmm;
30073- atomic_long_t tlb_dropin_fail_no_exception;
30074- atomic_long_t tfh_stale_on_fault;
30075- atomic_long_t mmu_invalidate_range;
30076- atomic_long_t mmu_invalidate_page;
30077- atomic_long_t flush_tlb;
30078- atomic_long_t flush_tlb_gru;
30079- atomic_long_t flush_tlb_gru_tgh;
30080- atomic_long_t flush_tlb_gru_zero_asid;
30081-
30082- atomic_long_t copy_gpa;
30083- atomic_long_t read_gpa;
30084-
30085- atomic_long_t mesq_receive;
30086- atomic_long_t mesq_receive_none;
30087- atomic_long_t mesq_send;
30088- atomic_long_t mesq_send_failed;
30089- atomic_long_t mesq_noop;
30090- atomic_long_t mesq_send_unexpected_error;
30091- atomic_long_t mesq_send_lb_overflow;
30092- atomic_long_t mesq_send_qlimit_reached;
30093- atomic_long_t mesq_send_amo_nacked;
30094- atomic_long_t mesq_send_put_nacked;
30095- atomic_long_t mesq_page_overflow;
30096- atomic_long_t mesq_qf_locked;
30097- atomic_long_t mesq_qf_noop_not_full;
30098- atomic_long_t mesq_qf_switch_head_failed;
30099- atomic_long_t mesq_qf_unexpected_error;
30100- atomic_long_t mesq_noop_unexpected_error;
30101- atomic_long_t mesq_noop_lb_overflow;
30102- atomic_long_t mesq_noop_qlimit_reached;
30103- atomic_long_t mesq_noop_amo_nacked;
30104- atomic_long_t mesq_noop_put_nacked;
30105- atomic_long_t mesq_noop_page_overflow;
30106+ atomic_long_unchecked_t vdata_alloc;
30107+ atomic_long_unchecked_t vdata_free;
30108+ atomic_long_unchecked_t gts_alloc;
30109+ atomic_long_unchecked_t gts_free;
30110+ atomic_long_unchecked_t gms_alloc;
30111+ atomic_long_unchecked_t gms_free;
30112+ atomic_long_unchecked_t gts_double_allocate;
30113+ atomic_long_unchecked_t assign_context;
30114+ atomic_long_unchecked_t assign_context_failed;
30115+ atomic_long_unchecked_t free_context;
30116+ atomic_long_unchecked_t load_user_context;
30117+ atomic_long_unchecked_t load_kernel_context;
30118+ atomic_long_unchecked_t lock_kernel_context;
30119+ atomic_long_unchecked_t unlock_kernel_context;
30120+ atomic_long_unchecked_t steal_user_context;
30121+ atomic_long_unchecked_t steal_kernel_context;
30122+ atomic_long_unchecked_t steal_context_failed;
30123+ atomic_long_unchecked_t nopfn;
30124+ atomic_long_unchecked_t asid_new;
30125+ atomic_long_unchecked_t asid_next;
30126+ atomic_long_unchecked_t asid_wrap;
30127+ atomic_long_unchecked_t asid_reuse;
30128+ atomic_long_unchecked_t intr;
30129+ atomic_long_unchecked_t intr_cbr;
30130+ atomic_long_unchecked_t intr_tfh;
30131+ atomic_long_unchecked_t intr_spurious;
30132+ atomic_long_unchecked_t intr_mm_lock_failed;
30133+ atomic_long_unchecked_t call_os;
30134+ atomic_long_unchecked_t call_os_wait_queue;
30135+ atomic_long_unchecked_t user_flush_tlb;
30136+ atomic_long_unchecked_t user_unload_context;
30137+ atomic_long_unchecked_t user_exception;
30138+ atomic_long_unchecked_t set_context_option;
30139+ atomic_long_unchecked_t check_context_retarget_intr;
30140+ atomic_long_unchecked_t check_context_unload;
30141+ atomic_long_unchecked_t tlb_dropin;
30142+ atomic_long_unchecked_t tlb_preload_page;
30143+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30144+ atomic_long_unchecked_t tlb_dropin_fail_upm;
30145+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30146+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30147+ atomic_long_unchecked_t tlb_dropin_fail_idle;
30148+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30149+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30150+ atomic_long_unchecked_t tfh_stale_on_fault;
30151+ atomic_long_unchecked_t mmu_invalidate_range;
30152+ atomic_long_unchecked_t mmu_invalidate_page;
30153+ atomic_long_unchecked_t flush_tlb;
30154+ atomic_long_unchecked_t flush_tlb_gru;
30155+ atomic_long_unchecked_t flush_tlb_gru_tgh;
30156+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30157+
30158+ atomic_long_unchecked_t copy_gpa;
30159+ atomic_long_unchecked_t read_gpa;
30160+
30161+ atomic_long_unchecked_t mesq_receive;
30162+ atomic_long_unchecked_t mesq_receive_none;
30163+ atomic_long_unchecked_t mesq_send;
30164+ atomic_long_unchecked_t mesq_send_failed;
30165+ atomic_long_unchecked_t mesq_noop;
30166+ atomic_long_unchecked_t mesq_send_unexpected_error;
30167+ atomic_long_unchecked_t mesq_send_lb_overflow;
30168+ atomic_long_unchecked_t mesq_send_qlimit_reached;
30169+ atomic_long_unchecked_t mesq_send_amo_nacked;
30170+ atomic_long_unchecked_t mesq_send_put_nacked;
30171+ atomic_long_unchecked_t mesq_page_overflow;
30172+ atomic_long_unchecked_t mesq_qf_locked;
30173+ atomic_long_unchecked_t mesq_qf_noop_not_full;
30174+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30175+ atomic_long_unchecked_t mesq_qf_unexpected_error;
30176+ atomic_long_unchecked_t mesq_noop_unexpected_error;
30177+ atomic_long_unchecked_t mesq_noop_lb_overflow;
30178+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30179+ atomic_long_unchecked_t mesq_noop_amo_nacked;
30180+ atomic_long_unchecked_t mesq_noop_put_nacked;
30181+ atomic_long_unchecked_t mesq_noop_page_overflow;
30182
30183 };
30184
30185@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30186 tghop_invalidate, mcsop_last};
30187
30188 struct mcs_op_statistic {
30189- atomic_long_t count;
30190- atomic_long_t total;
30191+ atomic_long_unchecked_t count;
30192+ atomic_long_unchecked_t total;
30193 unsigned long max;
30194 };
30195
30196@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30197
30198 #define STAT(id) do { \
30199 if (gru_options & OPT_STATS) \
30200- atomic_long_inc(&gru_stats.id); \
30201+ atomic_long_inc_unchecked(&gru_stats.id); \
30202 } while (0)
30203
30204 #ifdef CONFIG_SGI_GRU_DEBUG
30205diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc.h linux-3.0.7/drivers/misc/sgi-xp/xpc.h
30206--- linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-07-21 22:17:23.000000000 -0400
30207+++ linux-3.0.7/drivers/misc/sgi-xp/xpc.h 2011-10-11 10:44:33.000000000 -0400
30208@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30209 void (*received_payload) (struct xpc_channel *, void *);
30210 void (*notify_senders_of_disconnect) (struct xpc_channel *);
30211 };
30212+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30213
30214 /* struct xpc_partition act_state values (for XPC HB) */
30215
30216@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30217 /* found in xpc_main.c */
30218 extern struct device *xpc_part;
30219 extern struct device *xpc_chan;
30220-extern struct xpc_arch_operations xpc_arch_ops;
30221+extern xpc_arch_operations_no_const xpc_arch_ops;
30222 extern int xpc_disengage_timelimit;
30223 extern int xpc_disengage_timedout;
30224 extern int xpc_activate_IRQ_rcvd;
30225diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c
30226--- linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-07-21 22:17:23.000000000 -0400
30227+++ linux-3.0.7/drivers/misc/sgi-xp/xpc_main.c 2011-10-11 10:44:33.000000000 -0400
30228@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30229 .notifier_call = xpc_system_die,
30230 };
30231
30232-struct xpc_arch_operations xpc_arch_ops;
30233+xpc_arch_operations_no_const xpc_arch_ops;
30234
30235 /*
30236 * Timer function to enforce the timelimit on the partition disengage.
30237diff -urNp linux-3.0.7/drivers/misc/sgi-xp/xp.h linux-3.0.7/drivers/misc/sgi-xp/xp.h
30238--- linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-07-21 22:17:23.000000000 -0400
30239+++ linux-3.0.7/drivers/misc/sgi-xp/xp.h 2011-08-23 21:47:55.000000000 -0400
30240@@ -289,7 +289,7 @@ struct xpc_interface {
30241 xpc_notify_func, void *);
30242 void (*received) (short, int, void *);
30243 enum xp_retval (*partid_to_nasids) (short, void *);
30244-};
30245+} __no_const;
30246
30247 extern struct xpc_interface xpc_interface;
30248
30249diff -urNp linux-3.0.7/drivers/mmc/host/sdhci-pci.c linux-3.0.7/drivers/mmc/host/sdhci-pci.c
30250--- linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-07-21 22:17:23.000000000 -0400
30251+++ linux-3.0.7/drivers/mmc/host/sdhci-pci.c 2011-10-11 10:44:33.000000000 -0400
30252@@ -524,7 +524,7 @@ static const struct sdhci_pci_fixes sdhc
30253 .probe = via_probe,
30254 };
30255
30256-static const struct pci_device_id pci_ids[] __devinitdata = {
30257+static const struct pci_device_id pci_ids[] __devinitconst = {
30258 {
30259 .vendor = PCI_VENDOR_ID_RICOH,
30260 .device = PCI_DEVICE_ID_RICOH_R5C822,
30261diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c
30262--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-07-21 22:17:23.000000000 -0400
30263+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-23 21:48:14.000000000 -0400
30264@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30265 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30266 unsigned long timeo = jiffies + HZ;
30267
30268+ pax_track_stack();
30269+
30270 /* Prevent setting state FL_SYNCING for chip in suspended state. */
30271 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30272 goto sleep;
30273@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30274 unsigned long initial_adr;
30275 int initial_len = len;
30276
30277+ pax_track_stack();
30278+
30279 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30280 adr += chip->start;
30281 initial_adr = adr;
30282@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30283 int retries = 3;
30284 int ret;
30285
30286+ pax_track_stack();
30287+
30288 adr += chip->start;
30289
30290 retry:
30291diff -urNp linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c
30292--- linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-07-21 22:17:23.000000000 -0400
30293+++ linux-3.0.7/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-23 21:48:14.000000000 -0400
30294@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30295 unsigned long cmd_addr;
30296 struct cfi_private *cfi = map->fldrv_priv;
30297
30298+ pax_track_stack();
30299+
30300 adr += chip->start;
30301
30302 /* Ensure cmd read/writes are aligned. */
30303@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30304 DECLARE_WAITQUEUE(wait, current);
30305 int wbufsize, z;
30306
30307+ pax_track_stack();
30308+
30309 /* M58LW064A requires bus alignment for buffer wriets -- saw */
30310 if (adr & (map_bankwidth(map)-1))
30311 return -EINVAL;
30312@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30313 DECLARE_WAITQUEUE(wait, current);
30314 int ret = 0;
30315
30316+ pax_track_stack();
30317+
30318 adr += chip->start;
30319
30320 /* Let's determine this according to the interleave only once */
30321@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30322 unsigned long timeo = jiffies + HZ;
30323 DECLARE_WAITQUEUE(wait, current);
30324
30325+ pax_track_stack();
30326+
30327 adr += chip->start;
30328
30329 /* Let's determine this according to the interleave only once */
30330@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30331 unsigned long timeo = jiffies + HZ;
30332 DECLARE_WAITQUEUE(wait, current);
30333
30334+ pax_track_stack();
30335+
30336 adr += chip->start;
30337
30338 /* Let's determine this according to the interleave only once */
30339diff -urNp linux-3.0.7/drivers/mtd/devices/doc2000.c linux-3.0.7/drivers/mtd/devices/doc2000.c
30340--- linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-07-21 22:17:23.000000000 -0400
30341+++ linux-3.0.7/drivers/mtd/devices/doc2000.c 2011-08-23 21:47:55.000000000 -0400
30342@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30343
30344 /* The ECC will not be calculated correctly if less than 512 is written */
30345 /* DBB-
30346- if (len != 0x200 && eccbuf)
30347+ if (len != 0x200)
30348 printk(KERN_WARNING
30349 "ECC needs a full sector write (adr: %lx size %lx)\n",
30350 (long) to, (long) len);
30351diff -urNp linux-3.0.7/drivers/mtd/devices/doc2001.c linux-3.0.7/drivers/mtd/devices/doc2001.c
30352--- linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-07-21 22:17:23.000000000 -0400
30353+++ linux-3.0.7/drivers/mtd/devices/doc2001.c 2011-08-23 21:47:55.000000000 -0400
30354@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30355 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30356
30357 /* Don't allow read past end of device */
30358- if (from >= this->totlen)
30359+ if (from >= this->totlen || !len)
30360 return -EINVAL;
30361
30362 /* Don't allow a single read to cross a 512-byte block boundary */
30363diff -urNp linux-3.0.7/drivers/mtd/ftl.c linux-3.0.7/drivers/mtd/ftl.c
30364--- linux-3.0.7/drivers/mtd/ftl.c 2011-07-21 22:17:23.000000000 -0400
30365+++ linux-3.0.7/drivers/mtd/ftl.c 2011-08-23 21:48:14.000000000 -0400
30366@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30367 loff_t offset;
30368 uint16_t srcunitswap = cpu_to_le16(srcunit);
30369
30370+ pax_track_stack();
30371+
30372 eun = &part->EUNInfo[srcunit];
30373 xfer = &part->XferInfo[xferunit];
30374 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30375diff -urNp linux-3.0.7/drivers/mtd/inftlcore.c linux-3.0.7/drivers/mtd/inftlcore.c
30376--- linux-3.0.7/drivers/mtd/inftlcore.c 2011-07-21 22:17:23.000000000 -0400
30377+++ linux-3.0.7/drivers/mtd/inftlcore.c 2011-08-23 21:48:14.000000000 -0400
30378@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30379 struct inftl_oob oob;
30380 size_t retlen;
30381
30382+ pax_track_stack();
30383+
30384 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30385 "pending=%d)\n", inftl, thisVUC, pendingblock);
30386
30387diff -urNp linux-3.0.7/drivers/mtd/inftlmount.c linux-3.0.7/drivers/mtd/inftlmount.c
30388--- linux-3.0.7/drivers/mtd/inftlmount.c 2011-07-21 22:17:23.000000000 -0400
30389+++ linux-3.0.7/drivers/mtd/inftlmount.c 2011-08-23 21:48:14.000000000 -0400
30390@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30391 struct INFTLPartition *ip;
30392 size_t retlen;
30393
30394+ pax_track_stack();
30395+
30396 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30397
30398 /*
30399diff -urNp linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c
30400--- linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-07-21 22:17:23.000000000 -0400
30401+++ linux-3.0.7/drivers/mtd/lpddr/qinfo_probe.c 2011-08-23 21:48:14.000000000 -0400
30402@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30403 {
30404 map_word pfow_val[4];
30405
30406+ pax_track_stack();
30407+
30408 /* Check identification string */
30409 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30410 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30411diff -urNp linux-3.0.7/drivers/mtd/mtdchar.c linux-3.0.7/drivers/mtd/mtdchar.c
30412--- linux-3.0.7/drivers/mtd/mtdchar.c 2011-07-21 22:17:23.000000000 -0400
30413+++ linux-3.0.7/drivers/mtd/mtdchar.c 2011-08-23 21:48:14.000000000 -0400
30414@@ -553,6 +553,8 @@ static int mtd_ioctl(struct file *file,
30415 u_long size;
30416 struct mtd_info_user info;
30417
30418+ pax_track_stack();
30419+
30420 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30421
30422 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30423diff -urNp linux-3.0.7/drivers/mtd/nand/denali.c linux-3.0.7/drivers/mtd/nand/denali.c
30424--- linux-3.0.7/drivers/mtd/nand/denali.c 2011-07-21 22:17:23.000000000 -0400
30425+++ linux-3.0.7/drivers/mtd/nand/denali.c 2011-08-23 21:47:55.000000000 -0400
30426@@ -26,6 +26,7 @@
30427 #include <linux/pci.h>
30428 #include <linux/mtd/mtd.h>
30429 #include <linux/module.h>
30430+#include <linux/slab.h>
30431
30432 #include "denali.h"
30433
30434diff -urNp linux-3.0.7/drivers/mtd/nftlcore.c linux-3.0.7/drivers/mtd/nftlcore.c
30435--- linux-3.0.7/drivers/mtd/nftlcore.c 2011-07-21 22:17:23.000000000 -0400
30436+++ linux-3.0.7/drivers/mtd/nftlcore.c 2011-08-23 21:48:14.000000000 -0400
30437@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30438 int inplace = 1;
30439 size_t retlen;
30440
30441+ pax_track_stack();
30442+
30443 memset(BlockMap, 0xff, sizeof(BlockMap));
30444 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
30445
30446diff -urNp linux-3.0.7/drivers/mtd/nftlmount.c linux-3.0.7/drivers/mtd/nftlmount.c
30447--- linux-3.0.7/drivers/mtd/nftlmount.c 2011-07-21 22:17:23.000000000 -0400
30448+++ linux-3.0.7/drivers/mtd/nftlmount.c 2011-08-23 21:48:14.000000000 -0400
30449@@ -24,6 +24,7 @@
30450 #include <asm/errno.h>
30451 #include <linux/delay.h>
30452 #include <linux/slab.h>
30453+#include <linux/sched.h>
30454 #include <linux/mtd/mtd.h>
30455 #include <linux/mtd/nand.h>
30456 #include <linux/mtd/nftl.h>
30457@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
30458 struct mtd_info *mtd = nftl->mbd.mtd;
30459 unsigned int i;
30460
30461+ pax_track_stack();
30462+
30463 /* Assume logical EraseSize == physical erasesize for starting the scan.
30464 We'll sort it out later if we find a MediaHeader which says otherwise */
30465 /* Actually, we won't. The new DiskOnChip driver has already scanned
30466diff -urNp linux-3.0.7/drivers/mtd/ubi/build.c linux-3.0.7/drivers/mtd/ubi/build.c
30467--- linux-3.0.7/drivers/mtd/ubi/build.c 2011-07-21 22:17:23.000000000 -0400
30468+++ linux-3.0.7/drivers/mtd/ubi/build.c 2011-08-23 21:47:55.000000000 -0400
30469@@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
30470 static int __init bytes_str_to_int(const char *str)
30471 {
30472 char *endp;
30473- unsigned long result;
30474+ unsigned long result, scale = 1;
30475
30476 result = simple_strtoul(str, &endp, 0);
30477 if (str == endp || result >= INT_MAX) {
30478@@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
30479
30480 switch (*endp) {
30481 case 'G':
30482- result *= 1024;
30483+ scale *= 1024;
30484 case 'M':
30485- result *= 1024;
30486+ scale *= 1024;
30487 case 'K':
30488- result *= 1024;
30489+ scale *= 1024;
30490 if (endp[1] == 'i' && endp[2] == 'B')
30491 endp += 2;
30492 case '\0':
30493@@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
30494 return -EINVAL;
30495 }
30496
30497- return result;
30498+ if ((intoverflow_t)result*scale >= INT_MAX) {
30499+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
30500+ str);
30501+ return -EINVAL;
30502+ }
30503+
30504+ return result*scale;
30505 }
30506
30507 /**
30508diff -urNp linux-3.0.7/drivers/net/atlx/atl2.c linux-3.0.7/drivers/net/atlx/atl2.c
30509--- linux-3.0.7/drivers/net/atlx/atl2.c 2011-07-21 22:17:23.000000000 -0400
30510+++ linux-3.0.7/drivers/net/atlx/atl2.c 2011-10-11 10:44:33.000000000 -0400
30511@@ -2840,7 +2840,7 @@ static void atl2_force_ps(struct atl2_hw
30512 */
30513
30514 #define ATL2_PARAM(X, desc) \
30515- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30516+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
30517 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
30518 MODULE_PARM_DESC(X, desc);
30519 #else
30520diff -urNp linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c
30521--- linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-07-21 22:17:23.000000000 -0400
30522+++ linux-3.0.7/drivers/net/bna/bfa_ioc_ct.c 2011-08-23 21:47:55.000000000 -0400
30523@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
30524 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
30525 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
30526
30527-static struct bfa_ioc_hwif nw_hwif_ct;
30528+static struct bfa_ioc_hwif nw_hwif_ct = {
30529+ .ioc_pll_init = bfa_ioc_ct_pll_init,
30530+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
30531+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
30532+ .ioc_reg_init = bfa_ioc_ct_reg_init,
30533+ .ioc_map_port = bfa_ioc_ct_map_port,
30534+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
30535+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
30536+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
30537+ .ioc_sync_start = bfa_ioc_ct_sync_start,
30538+ .ioc_sync_join = bfa_ioc_ct_sync_join,
30539+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
30540+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
30541+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
30542+};
30543
30544 /**
30545 * Called from bfa_ioc_attach() to map asic specific calls.
30546@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
30547 void
30548 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
30549 {
30550- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
30551- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
30552- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
30553- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
30554- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
30555- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
30556- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
30557- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
30558- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
30559- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
30560- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
30561- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
30562- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
30563-
30564 ioc->ioc_hwif = &nw_hwif_ct;
30565 }
30566
30567diff -urNp linux-3.0.7/drivers/net/bna/bnad.c linux-3.0.7/drivers/net/bna/bnad.c
30568--- linux-3.0.7/drivers/net/bna/bnad.c 2011-07-21 22:17:23.000000000 -0400
30569+++ linux-3.0.7/drivers/net/bna/bnad.c 2011-08-23 21:47:55.000000000 -0400
30570@@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30571 struct bna_intr_info *intr_info =
30572 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
30573 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
30574- struct bna_tx_event_cbfn tx_cbfn;
30575+ static struct bna_tx_event_cbfn tx_cbfn = {
30576+ /* Initialize the tx event handlers */
30577+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
30578+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
30579+ .tx_stall_cbfn = bnad_cb_tx_stall,
30580+ .tx_resume_cbfn = bnad_cb_tx_resume,
30581+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
30582+ };
30583 struct bna_tx *tx;
30584 unsigned long flags;
30585
30586@@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
30587 tx_config->txq_depth = bnad->txq_depth;
30588 tx_config->tx_type = BNA_TX_T_REGULAR;
30589
30590- /* Initialize the tx event handlers */
30591- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
30592- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
30593- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
30594- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
30595- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
30596-
30597 /* Get BNA's resource requirement for one tx object */
30598 spin_lock_irqsave(&bnad->bna_lock, flags);
30599 bna_tx_res_req(bnad->num_txq_per_tx,
30600@@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
30601 struct bna_intr_info *intr_info =
30602 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
30603 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
30604- struct bna_rx_event_cbfn rx_cbfn;
30605+ static struct bna_rx_event_cbfn rx_cbfn = {
30606+ /* Initialize the Rx event handlers */
30607+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
30608+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
30609+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
30610+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
30611+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
30612+ .rx_post_cbfn = bnad_cb_rx_post
30613+ };
30614 struct bna_rx *rx;
30615 unsigned long flags;
30616
30617 /* Initialize the Rx object configuration */
30618 bnad_init_rx_config(bnad, rx_config);
30619
30620- /* Initialize the Rx event handlers */
30621- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
30622- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
30623- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
30624- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
30625- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
30626- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
30627-
30628 /* Get BNA's resource requirement for one Rx object */
30629 spin_lock_irqsave(&bnad->bna_lock, flags);
30630 bna_rx_res_req(rx_config, res_info);
30631diff -urNp linux-3.0.7/drivers/net/bnx2.c linux-3.0.7/drivers/net/bnx2.c
30632--- linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:54:54.000000000 -0400
30633+++ linux-3.0.7/drivers/net/bnx2.c 2011-10-16 21:55:27.000000000 -0400
30634@@ -5831,6 +5831,8 @@ bnx2_test_nvram(struct bnx2 *bp)
30635 int rc = 0;
30636 u32 magic, csum;
30637
30638+ pax_track_stack();
30639+
30640 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
30641 goto test_nvram_done;
30642
30643diff -urNp linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c
30644--- linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-07-21 22:17:23.000000000 -0400
30645+++ linux-3.0.7/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-23 21:48:14.000000000 -0400
30646@@ -1705,6 +1705,8 @@ static int bnx2x_test_nvram(struct bnx2x
30647 int i, rc;
30648 u32 magic, crc;
30649
30650+ pax_track_stack();
30651+
30652 if (BP_NOMCP(bp))
30653 return 0;
30654
30655diff -urNp linux-3.0.7/drivers/net/can/mscan/mscan.c linux-3.0.7/drivers/net/can/mscan/mscan.c
30656--- linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-07-21 22:17:23.000000000 -0400
30657+++ linux-3.0.7/drivers/net/can/mscan/mscan.c 2011-10-17 02:51:46.000000000 -0400
30658@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(stru
30659 void __iomem *data = &regs->tx.dsr1_0;
30660 u16 *payload = (u16 *)frame->data;
30661
30662- /* It is safe to write into dsr[dlc+1] */
30663- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30664+ for (i = 0; i < frame->can_dlc / 2; i++) {
30665 out_be16(data, *payload++);
30666 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30667 }
30668+ /* write remaining byte if necessary */
30669+ if (frame->can_dlc & 1)
30670+ out_8(data, frame->data[frame->can_dlc - 1]);
30671 }
30672
30673 out_8(&regs->tx.dlr, frame->can_dlc);
30674@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct ne
30675 void __iomem *data = &regs->rx.dsr1_0;
30676 u16 *payload = (u16 *)frame->data;
30677
30678- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
30679+ for (i = 0; i < frame->can_dlc / 2; i++) {
30680 *payload++ = in_be16(data);
30681 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
30682 }
30683+ /* read remaining byte if necessary */
30684+ if (frame->can_dlc & 1)
30685+ frame->data[frame->can_dlc - 1] = in_8(data);
30686 }
30687
30688 out_8(&regs->canrflg, MSCAN_RXF);
30689diff -urNp linux-3.0.7/drivers/net/cxgb3/l2t.h linux-3.0.7/drivers/net/cxgb3/l2t.h
30690--- linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:54:54.000000000 -0400
30691+++ linux-3.0.7/drivers/net/cxgb3/l2t.h 2011-10-16 21:55:27.000000000 -0400
30692@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
30693 */
30694 struct l2t_skb_cb {
30695 arp_failure_handler_func arp_failure_handler;
30696-};
30697+} __no_const;
30698
30699 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
30700
30701diff -urNp linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c
30702--- linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-07-21 22:17:23.000000000 -0400
30703+++ linux-3.0.7/drivers/net/cxgb4/cxgb4_main.c 2011-08-23 21:48:14.000000000 -0400
30704@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
30705 unsigned int nchan = adap->params.nports;
30706 struct msix_entry entries[MAX_INGQ + 1];
30707
30708+ pax_track_stack();
30709+
30710 for (i = 0; i < ARRAY_SIZE(entries); ++i)
30711 entries[i].entry = i;
30712
30713diff -urNp linux-3.0.7/drivers/net/cxgb4/t4_hw.c linux-3.0.7/drivers/net/cxgb4/t4_hw.c
30714--- linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-07-21 22:17:23.000000000 -0400
30715+++ linux-3.0.7/drivers/net/cxgb4/t4_hw.c 2011-08-23 21:48:14.000000000 -0400
30716@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
30717 u8 vpd[VPD_LEN], csum;
30718 unsigned int vpdr_len, kw_offset, id_len;
30719
30720+ pax_track_stack();
30721+
30722 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
30723 if (ret < 0)
30724 return ret;
30725diff -urNp linux-3.0.7/drivers/net/e1000e/82571.c linux-3.0.7/drivers/net/e1000e/82571.c
30726--- linux-3.0.7/drivers/net/e1000e/82571.c 2011-07-21 22:17:23.000000000 -0400
30727+++ linux-3.0.7/drivers/net/e1000e/82571.c 2011-08-23 21:47:55.000000000 -0400
30728@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
30729 {
30730 struct e1000_hw *hw = &adapter->hw;
30731 struct e1000_mac_info *mac = &hw->mac;
30732- struct e1000_mac_operations *func = &mac->ops;
30733+ e1000_mac_operations_no_const *func = &mac->ops;
30734 u32 swsm = 0;
30735 u32 swsm2 = 0;
30736 bool force_clear_smbi = false;
30737diff -urNp linux-3.0.7/drivers/net/e1000e/es2lan.c linux-3.0.7/drivers/net/e1000e/es2lan.c
30738--- linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-07-21 22:17:23.000000000 -0400
30739+++ linux-3.0.7/drivers/net/e1000e/es2lan.c 2011-08-23 21:47:55.000000000 -0400
30740@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
30741 {
30742 struct e1000_hw *hw = &adapter->hw;
30743 struct e1000_mac_info *mac = &hw->mac;
30744- struct e1000_mac_operations *func = &mac->ops;
30745+ e1000_mac_operations_no_const *func = &mac->ops;
30746
30747 /* Set media type */
30748 switch (adapter->pdev->device) {
30749diff -urNp linux-3.0.7/drivers/net/e1000e/hw.h linux-3.0.7/drivers/net/e1000e/hw.h
30750--- linux-3.0.7/drivers/net/e1000e/hw.h 2011-07-21 22:17:23.000000000 -0400
30751+++ linux-3.0.7/drivers/net/e1000e/hw.h 2011-08-23 21:47:55.000000000 -0400
30752@@ -776,6 +776,7 @@ struct e1000_mac_operations {
30753 void (*write_vfta)(struct e1000_hw *, u32, u32);
30754 s32 (*read_mac_addr)(struct e1000_hw *);
30755 };
30756+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30757
30758 /* Function pointers for the PHY. */
30759 struct e1000_phy_operations {
30760@@ -799,6 +800,7 @@ struct e1000_phy_operations {
30761 void (*power_up)(struct e1000_hw *);
30762 void (*power_down)(struct e1000_hw *);
30763 };
30764+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30765
30766 /* Function pointers for the NVM. */
30767 struct e1000_nvm_operations {
30768@@ -810,9 +812,10 @@ struct e1000_nvm_operations {
30769 s32 (*validate)(struct e1000_hw *);
30770 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
30771 };
30772+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30773
30774 struct e1000_mac_info {
30775- struct e1000_mac_operations ops;
30776+ e1000_mac_operations_no_const ops;
30777 u8 addr[ETH_ALEN];
30778 u8 perm_addr[ETH_ALEN];
30779
30780@@ -853,7 +856,7 @@ struct e1000_mac_info {
30781 };
30782
30783 struct e1000_phy_info {
30784- struct e1000_phy_operations ops;
30785+ e1000_phy_operations_no_const ops;
30786
30787 enum e1000_phy_type type;
30788
30789@@ -887,7 +890,7 @@ struct e1000_phy_info {
30790 };
30791
30792 struct e1000_nvm_info {
30793- struct e1000_nvm_operations ops;
30794+ e1000_nvm_operations_no_const ops;
30795
30796 enum e1000_nvm_type type;
30797 enum e1000_nvm_override override;
30798diff -urNp linux-3.0.7/drivers/net/fealnx.c linux-3.0.7/drivers/net/fealnx.c
30799--- linux-3.0.7/drivers/net/fealnx.c 2011-07-21 22:17:23.000000000 -0400
30800+++ linux-3.0.7/drivers/net/fealnx.c 2011-10-11 10:44:33.000000000 -0400
30801@@ -150,7 +150,7 @@ struct chip_info {
30802 int flags;
30803 };
30804
30805-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
30806+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
30807 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30808 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
30809 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
30810diff -urNp linux-3.0.7/drivers/net/hamradio/6pack.c linux-3.0.7/drivers/net/hamradio/6pack.c
30811--- linux-3.0.7/drivers/net/hamradio/6pack.c 2011-07-21 22:17:23.000000000 -0400
30812+++ linux-3.0.7/drivers/net/hamradio/6pack.c 2011-08-23 21:48:14.000000000 -0400
30813@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
30814 unsigned char buf[512];
30815 int count1;
30816
30817+ pax_track_stack();
30818+
30819 if (!count)
30820 return;
30821
30822diff -urNp linux-3.0.7/drivers/net/igb/e1000_hw.h linux-3.0.7/drivers/net/igb/e1000_hw.h
30823--- linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-07-21 22:17:23.000000000 -0400
30824+++ linux-3.0.7/drivers/net/igb/e1000_hw.h 2011-08-23 21:47:55.000000000 -0400
30825@@ -314,6 +314,7 @@ struct e1000_mac_operations {
30826 s32 (*read_mac_addr)(struct e1000_hw *);
30827 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
30828 };
30829+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30830
30831 struct e1000_phy_operations {
30832 s32 (*acquire)(struct e1000_hw *);
30833@@ -330,6 +331,7 @@ struct e1000_phy_operations {
30834 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
30835 s32 (*write_reg)(struct e1000_hw *, u32, u16);
30836 };
30837+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
30838
30839 struct e1000_nvm_operations {
30840 s32 (*acquire)(struct e1000_hw *);
30841@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
30842 s32 (*update)(struct e1000_hw *);
30843 s32 (*validate)(struct e1000_hw *);
30844 };
30845+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
30846
30847 struct e1000_info {
30848 s32 (*get_invariants)(struct e1000_hw *);
30849@@ -350,7 +353,7 @@ struct e1000_info {
30850 extern const struct e1000_info e1000_82575_info;
30851
30852 struct e1000_mac_info {
30853- struct e1000_mac_operations ops;
30854+ e1000_mac_operations_no_const ops;
30855
30856 u8 addr[6];
30857 u8 perm_addr[6];
30858@@ -388,7 +391,7 @@ struct e1000_mac_info {
30859 };
30860
30861 struct e1000_phy_info {
30862- struct e1000_phy_operations ops;
30863+ e1000_phy_operations_no_const ops;
30864
30865 enum e1000_phy_type type;
30866
30867@@ -423,7 +426,7 @@ struct e1000_phy_info {
30868 };
30869
30870 struct e1000_nvm_info {
30871- struct e1000_nvm_operations ops;
30872+ e1000_nvm_operations_no_const ops;
30873 enum e1000_nvm_type type;
30874 enum e1000_nvm_override override;
30875
30876@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
30877 s32 (*check_for_ack)(struct e1000_hw *, u16);
30878 s32 (*check_for_rst)(struct e1000_hw *, u16);
30879 };
30880+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30881
30882 struct e1000_mbx_stats {
30883 u32 msgs_tx;
30884@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
30885 };
30886
30887 struct e1000_mbx_info {
30888- struct e1000_mbx_operations ops;
30889+ e1000_mbx_operations_no_const ops;
30890 struct e1000_mbx_stats stats;
30891 u32 timeout;
30892 u32 usec_delay;
30893diff -urNp linux-3.0.7/drivers/net/igbvf/vf.h linux-3.0.7/drivers/net/igbvf/vf.h
30894--- linux-3.0.7/drivers/net/igbvf/vf.h 2011-07-21 22:17:23.000000000 -0400
30895+++ linux-3.0.7/drivers/net/igbvf/vf.h 2011-08-23 21:47:55.000000000 -0400
30896@@ -189,9 +189,10 @@ struct e1000_mac_operations {
30897 s32 (*read_mac_addr)(struct e1000_hw *);
30898 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
30899 };
30900+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
30901
30902 struct e1000_mac_info {
30903- struct e1000_mac_operations ops;
30904+ e1000_mac_operations_no_const ops;
30905 u8 addr[6];
30906 u8 perm_addr[6];
30907
30908@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
30909 s32 (*check_for_ack)(struct e1000_hw *);
30910 s32 (*check_for_rst)(struct e1000_hw *);
30911 };
30912+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
30913
30914 struct e1000_mbx_stats {
30915 u32 msgs_tx;
30916@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
30917 };
30918
30919 struct e1000_mbx_info {
30920- struct e1000_mbx_operations ops;
30921+ e1000_mbx_operations_no_const ops;
30922 struct e1000_mbx_stats stats;
30923 u32 timeout;
30924 u32 usec_delay;
30925diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_main.c linux-3.0.7/drivers/net/ixgb/ixgb_main.c
30926--- linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-07-21 22:17:23.000000000 -0400
30927+++ linux-3.0.7/drivers/net/ixgb/ixgb_main.c 2011-08-23 21:48:14.000000000 -0400
30928@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
30929 u32 rctl;
30930 int i;
30931
30932+ pax_track_stack();
30933+
30934 /* Check for Promiscuous and All Multicast modes */
30935
30936 rctl = IXGB_READ_REG(hw, RCTL);
30937diff -urNp linux-3.0.7/drivers/net/ixgb/ixgb_param.c linux-3.0.7/drivers/net/ixgb/ixgb_param.c
30938--- linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-07-21 22:17:23.000000000 -0400
30939+++ linux-3.0.7/drivers/net/ixgb/ixgb_param.c 2011-08-23 21:48:14.000000000 -0400
30940@@ -261,6 +261,9 @@ void __devinit
30941 ixgb_check_options(struct ixgb_adapter *adapter)
30942 {
30943 int bd = adapter->bd_number;
30944+
30945+ pax_track_stack();
30946+
30947 if (bd >= IXGB_MAX_NIC) {
30948 pr_notice("Warning: no configuration for board #%i\n", bd);
30949 pr_notice("Using defaults for all values\n");
30950diff -urNp linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h
30951--- linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-07-21 22:17:23.000000000 -0400
30952+++ linux-3.0.7/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:47:55.000000000 -0400
30953@@ -2584,6 +2584,7 @@ struct ixgbe_eeprom_operations {
30954 s32 (*update_checksum)(struct ixgbe_hw *);
30955 u16 (*calc_checksum)(struct ixgbe_hw *);
30956 };
30957+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
30958
30959 struct ixgbe_mac_operations {
30960 s32 (*init_hw)(struct ixgbe_hw *);
30961@@ -2639,6 +2640,7 @@ struct ixgbe_mac_operations {
30962 /* Flow Control */
30963 s32 (*fc_enable)(struct ixgbe_hw *, s32);
30964 };
30965+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
30966
30967 struct ixgbe_phy_operations {
30968 s32 (*identify)(struct ixgbe_hw *);
30969@@ -2658,9 +2660,10 @@ struct ixgbe_phy_operations {
30970 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
30971 s32 (*check_overtemp)(struct ixgbe_hw *);
30972 };
30973+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
30974
30975 struct ixgbe_eeprom_info {
30976- struct ixgbe_eeprom_operations ops;
30977+ ixgbe_eeprom_operations_no_const ops;
30978 enum ixgbe_eeprom_type type;
30979 u32 semaphore_delay;
30980 u16 word_size;
30981@@ -2670,7 +2673,7 @@ struct ixgbe_eeprom_info {
30982
30983 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
30984 struct ixgbe_mac_info {
30985- struct ixgbe_mac_operations ops;
30986+ ixgbe_mac_operations_no_const ops;
30987 enum ixgbe_mac_type type;
30988 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30989 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
30990@@ -2698,7 +2701,7 @@ struct ixgbe_mac_info {
30991 };
30992
30993 struct ixgbe_phy_info {
30994- struct ixgbe_phy_operations ops;
30995+ ixgbe_phy_operations_no_const ops;
30996 struct mdio_if_info mdio;
30997 enum ixgbe_phy_type type;
30998 u32 id;
30999@@ -2726,6 +2729,7 @@ struct ixgbe_mbx_operations {
31000 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31001 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31002 };
31003+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31004
31005 struct ixgbe_mbx_stats {
31006 u32 msgs_tx;
31007@@ -2737,7 +2741,7 @@ struct ixgbe_mbx_stats {
31008 };
31009
31010 struct ixgbe_mbx_info {
31011- struct ixgbe_mbx_operations ops;
31012+ ixgbe_mbx_operations_no_const ops;
31013 struct ixgbe_mbx_stats stats;
31014 u32 timeout;
31015 u32 usec_delay;
31016diff -urNp linux-3.0.7/drivers/net/ixgbevf/vf.h linux-3.0.7/drivers/net/ixgbevf/vf.h
31017--- linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-07-21 22:17:23.000000000 -0400
31018+++ linux-3.0.7/drivers/net/ixgbevf/vf.h 2011-08-23 21:47:55.000000000 -0400
31019@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31020 s32 (*clear_vfta)(struct ixgbe_hw *);
31021 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31022 };
31023+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31024
31025 enum ixgbe_mac_type {
31026 ixgbe_mac_unknown = 0,
31027@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31028 };
31029
31030 struct ixgbe_mac_info {
31031- struct ixgbe_mac_operations ops;
31032+ ixgbe_mac_operations_no_const ops;
31033 u8 addr[6];
31034 u8 perm_addr[6];
31035
31036@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31037 s32 (*check_for_ack)(struct ixgbe_hw *);
31038 s32 (*check_for_rst)(struct ixgbe_hw *);
31039 };
31040+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31041
31042 struct ixgbe_mbx_stats {
31043 u32 msgs_tx;
31044@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31045 };
31046
31047 struct ixgbe_mbx_info {
31048- struct ixgbe_mbx_operations ops;
31049+ ixgbe_mbx_operations_no_const ops;
31050 struct ixgbe_mbx_stats stats;
31051 u32 timeout;
31052 u32 udelay;
31053diff -urNp linux-3.0.7/drivers/net/ksz884x.c linux-3.0.7/drivers/net/ksz884x.c
31054--- linux-3.0.7/drivers/net/ksz884x.c 2011-07-21 22:17:23.000000000 -0400
31055+++ linux-3.0.7/drivers/net/ksz884x.c 2011-08-23 21:48:14.000000000 -0400
31056@@ -6534,6 +6534,8 @@ static void netdev_get_ethtool_stats(str
31057 int rc;
31058 u64 counter[TOTAL_PORT_COUNTER_NUM];
31059
31060+ pax_track_stack();
31061+
31062 mutex_lock(&hw_priv->lock);
31063 n = SWITCH_PORT_NUM;
31064 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31065diff -urNp linux-3.0.7/drivers/net/mlx4/main.c linux-3.0.7/drivers/net/mlx4/main.c
31066--- linux-3.0.7/drivers/net/mlx4/main.c 2011-07-21 22:17:23.000000000 -0400
31067+++ linux-3.0.7/drivers/net/mlx4/main.c 2011-08-23 21:48:14.000000000 -0400
31068@@ -40,6 +40,7 @@
31069 #include <linux/dma-mapping.h>
31070 #include <linux/slab.h>
31071 #include <linux/io-mapping.h>
31072+#include <linux/sched.h>
31073
31074 #include <linux/mlx4/device.h>
31075 #include <linux/mlx4/doorbell.h>
31076@@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
31077 u64 icm_size;
31078 int err;
31079
31080+ pax_track_stack();
31081+
31082 err = mlx4_QUERY_FW(dev);
31083 if (err) {
31084 if (err == -EACCES)
31085diff -urNp linux-3.0.7/drivers/net/niu.c linux-3.0.7/drivers/net/niu.c
31086--- linux-3.0.7/drivers/net/niu.c 2011-09-02 18:11:21.000000000 -0400
31087+++ linux-3.0.7/drivers/net/niu.c 2011-08-23 21:48:14.000000000 -0400
31088@@ -9056,6 +9056,8 @@ static void __devinit niu_try_msix(struc
31089 int i, num_irqs, err;
31090 u8 first_ldg;
31091
31092+ pax_track_stack();
31093+
31094 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31095 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31096 ldg_num_map[i] = first_ldg + i;
31097diff -urNp linux-3.0.7/drivers/net/pcnet32.c linux-3.0.7/drivers/net/pcnet32.c
31098--- linux-3.0.7/drivers/net/pcnet32.c 2011-07-21 22:17:23.000000000 -0400
31099+++ linux-3.0.7/drivers/net/pcnet32.c 2011-08-23 21:47:55.000000000 -0400
31100@@ -82,7 +82,7 @@ static int cards_found;
31101 /*
31102 * VLB I/O addresses
31103 */
31104-static unsigned int pcnet32_portlist[] __initdata =
31105+static unsigned int pcnet32_portlist[] __devinitdata =
31106 { 0x300, 0x320, 0x340, 0x360, 0 };
31107
31108 static int pcnet32_debug;
31109@@ -270,7 +270,7 @@ struct pcnet32_private {
31110 struct sk_buff **rx_skbuff;
31111 dma_addr_t *tx_dma_addr;
31112 dma_addr_t *rx_dma_addr;
31113- struct pcnet32_access a;
31114+ struct pcnet32_access *a;
31115 spinlock_t lock; /* Guard lock */
31116 unsigned int cur_rx, cur_tx; /* The next free ring entry */
31117 unsigned int rx_ring_size; /* current rx ring size */
31118@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31119 u16 val;
31120
31121 netif_wake_queue(dev);
31122- val = lp->a.read_csr(ioaddr, CSR3);
31123+ val = lp->a->read_csr(ioaddr, CSR3);
31124 val &= 0x00ff;
31125- lp->a.write_csr(ioaddr, CSR3, val);
31126+ lp->a->write_csr(ioaddr, CSR3, val);
31127 napi_enable(&lp->napi);
31128 }
31129
31130@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31131 r = mii_link_ok(&lp->mii_if);
31132 } else if (lp->chip_version >= PCNET32_79C970A) {
31133 ulong ioaddr = dev->base_addr; /* card base I/O address */
31134- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31135+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31136 } else { /* can not detect link on really old chips */
31137 r = 1;
31138 }
31139@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31140 pcnet32_netif_stop(dev);
31141
31142 spin_lock_irqsave(&lp->lock, flags);
31143- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31144+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31145
31146 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31147
31148@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31149 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31150 {
31151 struct pcnet32_private *lp = netdev_priv(dev);
31152- struct pcnet32_access *a = &lp->a; /* access to registers */
31153+ struct pcnet32_access *a = lp->a; /* access to registers */
31154 ulong ioaddr = dev->base_addr; /* card base I/O address */
31155 struct sk_buff *skb; /* sk buff */
31156 int x, i; /* counters */
31157@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31158 pcnet32_netif_stop(dev);
31159
31160 spin_lock_irqsave(&lp->lock, flags);
31161- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31162+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31163
31164 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31165
31166 /* Reset the PCNET32 */
31167- lp->a.reset(ioaddr);
31168- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31169+ lp->a->reset(ioaddr);
31170+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31171
31172 /* switch pcnet32 to 32bit mode */
31173- lp->a.write_bcr(ioaddr, 20, 2);
31174+ lp->a->write_bcr(ioaddr, 20, 2);
31175
31176 /* purge & init rings but don't actually restart */
31177 pcnet32_restart(dev, 0x0000);
31178
31179- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31180+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31181
31182 /* Initialize Transmit buffers. */
31183 size = data_len + 15;
31184@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31185
31186 /* set int loopback in CSR15 */
31187 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31188- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31189+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31190
31191 teststatus = cpu_to_le16(0x8000);
31192- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31193+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31194
31195 /* Check status of descriptors */
31196 for (x = 0; x < numbuffs; x++) {
31197@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31198 }
31199 }
31200
31201- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31202+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31203 wmb();
31204 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31205 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31206@@ -1015,7 +1015,7 @@ clean_up:
31207 pcnet32_restart(dev, CSR0_NORMAL);
31208 } else {
31209 pcnet32_purge_rx_ring(dev);
31210- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31211+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31212 }
31213 spin_unlock_irqrestore(&lp->lock, flags);
31214
31215@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31216 enum ethtool_phys_id_state state)
31217 {
31218 struct pcnet32_private *lp = netdev_priv(dev);
31219- struct pcnet32_access *a = &lp->a;
31220+ struct pcnet32_access *a = lp->a;
31221 ulong ioaddr = dev->base_addr;
31222 unsigned long flags;
31223 int i;
31224@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31225 {
31226 int csr5;
31227 struct pcnet32_private *lp = netdev_priv(dev);
31228- struct pcnet32_access *a = &lp->a;
31229+ struct pcnet32_access *a = lp->a;
31230 ulong ioaddr = dev->base_addr;
31231 int ticks;
31232
31233@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31234 spin_lock_irqsave(&lp->lock, flags);
31235 if (pcnet32_tx(dev)) {
31236 /* reset the chip to clear the error condition, then restart */
31237- lp->a.reset(ioaddr);
31238- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31239+ lp->a->reset(ioaddr);
31240+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31241 pcnet32_restart(dev, CSR0_START);
31242 netif_wake_queue(dev);
31243 }
31244@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31245 __napi_complete(napi);
31246
31247 /* clear interrupt masks */
31248- val = lp->a.read_csr(ioaddr, CSR3);
31249+ val = lp->a->read_csr(ioaddr, CSR3);
31250 val &= 0x00ff;
31251- lp->a.write_csr(ioaddr, CSR3, val);
31252+ lp->a->write_csr(ioaddr, CSR3, val);
31253
31254 /* Set interrupt enable. */
31255- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31256+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31257
31258 spin_unlock_irqrestore(&lp->lock, flags);
31259 }
31260@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31261 int i, csr0;
31262 u16 *buff = ptr;
31263 struct pcnet32_private *lp = netdev_priv(dev);
31264- struct pcnet32_access *a = &lp->a;
31265+ struct pcnet32_access *a = lp->a;
31266 ulong ioaddr = dev->base_addr;
31267 unsigned long flags;
31268
31269@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31270 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31271 if (lp->phymask & (1 << j)) {
31272 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31273- lp->a.write_bcr(ioaddr, 33,
31274+ lp->a->write_bcr(ioaddr, 33,
31275 (j << 5) | i);
31276- *buff++ = lp->a.read_bcr(ioaddr, 34);
31277+ *buff++ = lp->a->read_bcr(ioaddr, 34);
31278 }
31279 }
31280 }
31281@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31282 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31283 lp->options |= PCNET32_PORT_FD;
31284
31285- lp->a = *a;
31286+ lp->a = a;
31287
31288 /* prior to register_netdev, dev->name is not yet correct */
31289 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31290@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31291 if (lp->mii) {
31292 /* lp->phycount and lp->phymask are set to 0 by memset above */
31293
31294- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31295+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31296 /* scan for PHYs */
31297 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31298 unsigned short id1, id2;
31299@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31300 pr_info("Found PHY %04x:%04x at address %d\n",
31301 id1, id2, i);
31302 }
31303- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31304+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31305 if (lp->phycount > 1)
31306 lp->options |= PCNET32_PORT_MII;
31307 }
31308@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31309 }
31310
31311 /* Reset the PCNET32 */
31312- lp->a.reset(ioaddr);
31313+ lp->a->reset(ioaddr);
31314
31315 /* switch pcnet32 to 32bit mode */
31316- lp->a.write_bcr(ioaddr, 20, 2);
31317+ lp->a->write_bcr(ioaddr, 20, 2);
31318
31319 netif_printk(lp, ifup, KERN_DEBUG, dev,
31320 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31321@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31322 (u32) (lp->init_dma_addr));
31323
31324 /* set/reset autoselect bit */
31325- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31326+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31327 if (lp->options & PCNET32_PORT_ASEL)
31328 val |= 2;
31329- lp->a.write_bcr(ioaddr, 2, val);
31330+ lp->a->write_bcr(ioaddr, 2, val);
31331
31332 /* handle full duplex setting */
31333 if (lp->mii_if.full_duplex) {
31334- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31335+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31336 if (lp->options & PCNET32_PORT_FD) {
31337 val |= 1;
31338 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31339@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31340 if (lp->chip_version == 0x2627)
31341 val |= 3;
31342 }
31343- lp->a.write_bcr(ioaddr, 9, val);
31344+ lp->a->write_bcr(ioaddr, 9, val);
31345 }
31346
31347 /* set/reset GPSI bit in test register */
31348- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31349+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31350 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31351 val |= 0x10;
31352- lp->a.write_csr(ioaddr, 124, val);
31353+ lp->a->write_csr(ioaddr, 124, val);
31354
31355 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31356 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31357@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31358 * duplex, and/or enable auto negotiation, and clear DANAS
31359 */
31360 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31361- lp->a.write_bcr(ioaddr, 32,
31362- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31363+ lp->a->write_bcr(ioaddr, 32,
31364+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31365 /* disable Auto Negotiation, set 10Mpbs, HD */
31366- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31367+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31368 if (lp->options & PCNET32_PORT_FD)
31369 val |= 0x10;
31370 if (lp->options & PCNET32_PORT_100)
31371 val |= 0x08;
31372- lp->a.write_bcr(ioaddr, 32, val);
31373+ lp->a->write_bcr(ioaddr, 32, val);
31374 } else {
31375 if (lp->options & PCNET32_PORT_ASEL) {
31376- lp->a.write_bcr(ioaddr, 32,
31377- lp->a.read_bcr(ioaddr,
31378+ lp->a->write_bcr(ioaddr, 32,
31379+ lp->a->read_bcr(ioaddr,
31380 32) | 0x0080);
31381 /* enable auto negotiate, setup, disable fd */
31382- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31383+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31384 val |= 0x20;
31385- lp->a.write_bcr(ioaddr, 32, val);
31386+ lp->a->write_bcr(ioaddr, 32, val);
31387 }
31388 }
31389 } else {
31390@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31391 * There is really no good other way to handle multiple PHYs
31392 * other than turning off all automatics
31393 */
31394- val = lp->a.read_bcr(ioaddr, 2);
31395- lp->a.write_bcr(ioaddr, 2, val & ~2);
31396- val = lp->a.read_bcr(ioaddr, 32);
31397- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31398+ val = lp->a->read_bcr(ioaddr, 2);
31399+ lp->a->write_bcr(ioaddr, 2, val & ~2);
31400+ val = lp->a->read_bcr(ioaddr, 32);
31401+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31402
31403 if (!(lp->options & PCNET32_PORT_ASEL)) {
31404 /* setup ecmd */
31405@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31406 ethtool_cmd_speed_set(&ecmd,
31407 (lp->options & PCNET32_PORT_100) ?
31408 SPEED_100 : SPEED_10);
31409- bcr9 = lp->a.read_bcr(ioaddr, 9);
31410+ bcr9 = lp->a->read_bcr(ioaddr, 9);
31411
31412 if (lp->options & PCNET32_PORT_FD) {
31413 ecmd.duplex = DUPLEX_FULL;
31414@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31415 ecmd.duplex = DUPLEX_HALF;
31416 bcr9 |= ~(1 << 0);
31417 }
31418- lp->a.write_bcr(ioaddr, 9, bcr9);
31419+ lp->a->write_bcr(ioaddr, 9, bcr9);
31420 }
31421
31422 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31423@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31424
31425 #ifdef DO_DXSUFLO
31426 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31427- val = lp->a.read_csr(ioaddr, CSR3);
31428+ val = lp->a->read_csr(ioaddr, CSR3);
31429 val |= 0x40;
31430- lp->a.write_csr(ioaddr, CSR3, val);
31431+ lp->a->write_csr(ioaddr, CSR3, val);
31432 }
31433 #endif
31434
31435@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31436 napi_enable(&lp->napi);
31437
31438 /* Re-initialize the PCNET32, and start it when done. */
31439- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31440- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31441+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31442+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31443
31444- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31445- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31446+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31447+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31448
31449 netif_start_queue(dev);
31450
31451@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31452
31453 i = 0;
31454 while (i++ < 100)
31455- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31456+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31457 break;
31458 /*
31459 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31460 * reports that doing so triggers a bug in the '974.
31461 */
31462- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31463+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31464
31465 netif_printk(lp, ifup, KERN_DEBUG, dev,
31466 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31467 i,
31468 (u32) (lp->init_dma_addr),
31469- lp->a.read_csr(ioaddr, CSR0));
31470+ lp->a->read_csr(ioaddr, CSR0));
31471
31472 spin_unlock_irqrestore(&lp->lock, flags);
31473
31474@@ -2218,7 +2218,7 @@ err_free_ring:
31475 * Switch back to 16bit mode to avoid problems with dumb
31476 * DOS packet driver after a warm reboot
31477 */
31478- lp->a.write_bcr(ioaddr, 20, 4);
31479+ lp->a->write_bcr(ioaddr, 20, 4);
31480
31481 err_free_irq:
31482 spin_unlock_irqrestore(&lp->lock, flags);
31483@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
31484
31485 /* wait for stop */
31486 for (i = 0; i < 100; i++)
31487- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
31488+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
31489 break;
31490
31491 if (i >= 100)
31492@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
31493 return;
31494
31495 /* ReInit Ring */
31496- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31497+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31498 i = 0;
31499 while (i++ < 1000)
31500- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31501+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31502 break;
31503
31504- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
31505+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
31506 }
31507
31508 static void pcnet32_tx_timeout(struct net_device *dev)
31509@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
31510 /* Transmitter timeout, serious problems. */
31511 if (pcnet32_debug & NETIF_MSG_DRV)
31512 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
31513- dev->name, lp->a.read_csr(ioaddr, CSR0));
31514- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31515+ dev->name, lp->a->read_csr(ioaddr, CSR0));
31516+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31517 dev->stats.tx_errors++;
31518 if (netif_msg_tx_err(lp)) {
31519 int i;
31520@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31521
31522 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
31523 "%s() called, csr0 %4.4x\n",
31524- __func__, lp->a.read_csr(ioaddr, CSR0));
31525+ __func__, lp->a->read_csr(ioaddr, CSR0));
31526
31527 /* Default status -- will not enable Successful-TxDone
31528 * interrupt when that option is available to us.
31529@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
31530 dev->stats.tx_bytes += skb->len;
31531
31532 /* Trigger an immediate send poll. */
31533- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31534+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
31535
31536 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
31537 lp->tx_full = 1;
31538@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
31539
31540 spin_lock(&lp->lock);
31541
31542- csr0 = lp->a.read_csr(ioaddr, CSR0);
31543+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31544 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
31545 if (csr0 == 0xffff)
31546 break; /* PCMCIA remove happened */
31547 /* Acknowledge all of the current interrupt sources ASAP. */
31548- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31549+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
31550
31551 netif_printk(lp, intr, KERN_DEBUG, dev,
31552 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
31553- csr0, lp->a.read_csr(ioaddr, CSR0));
31554+ csr0, lp->a->read_csr(ioaddr, CSR0));
31555
31556 /* Log misc errors. */
31557 if (csr0 & 0x4000)
31558@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
31559 if (napi_schedule_prep(&lp->napi)) {
31560 u16 val;
31561 /* set interrupt masks */
31562- val = lp->a.read_csr(ioaddr, CSR3);
31563+ val = lp->a->read_csr(ioaddr, CSR3);
31564 val |= 0x5f00;
31565- lp->a.write_csr(ioaddr, CSR3, val);
31566+ lp->a->write_csr(ioaddr, CSR3, val);
31567
31568 __napi_schedule(&lp->napi);
31569 break;
31570 }
31571- csr0 = lp->a.read_csr(ioaddr, CSR0);
31572+ csr0 = lp->a->read_csr(ioaddr, CSR0);
31573 }
31574
31575 netif_printk(lp, intr, KERN_DEBUG, dev,
31576 "exiting interrupt, csr0=%#4.4x\n",
31577- lp->a.read_csr(ioaddr, CSR0));
31578+ lp->a->read_csr(ioaddr, CSR0));
31579
31580 spin_unlock(&lp->lock);
31581
31582@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
31583
31584 spin_lock_irqsave(&lp->lock, flags);
31585
31586- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31587+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31588
31589 netif_printk(lp, ifdown, KERN_DEBUG, dev,
31590 "Shutting down ethercard, status was %2.2x\n",
31591- lp->a.read_csr(ioaddr, CSR0));
31592+ lp->a->read_csr(ioaddr, CSR0));
31593
31594 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
31595- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31596+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31597
31598 /*
31599 * Switch back to 16bit mode to avoid problems with dumb
31600 * DOS packet driver after a warm reboot
31601 */
31602- lp->a.write_bcr(ioaddr, 20, 4);
31603+ lp->a->write_bcr(ioaddr, 20, 4);
31604
31605 spin_unlock_irqrestore(&lp->lock, flags);
31606
31607@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
31608 unsigned long flags;
31609
31610 spin_lock_irqsave(&lp->lock, flags);
31611- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
31612+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
31613 spin_unlock_irqrestore(&lp->lock, flags);
31614
31615 return &dev->stats;
31616@@ -2578,10 +2578,10 @@ static void pcnet32_load_multicast(struc
31617 if (dev->flags & IFF_ALLMULTI) {
31618 ib->filter[0] = cpu_to_le32(~0U);
31619 ib->filter[1] = cpu_to_le32(~0U);
31620- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31621- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31622- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31623- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31624+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
31625+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
31626+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
31627+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
31628 return;
31629 }
31630 /* clear the multicast filter */
31631@@ -2601,7 +2601,7 @@ static void pcnet32_load_multicast(struc
31632 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
31633 }
31634 for (i = 0; i < 4; i++)
31635- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
31636+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
31637 le16_to_cpu(mcast_table[i]));
31638 }
31639
31640@@ -2616,28 +2616,28 @@ static void pcnet32_set_multicast_list(s
31641
31642 spin_lock_irqsave(&lp->lock, flags);
31643 suspended = pcnet32_suspend(dev, &flags, 0);
31644- csr15 = lp->a.read_csr(ioaddr, CSR15);
31645+ csr15 = lp->a->read_csr(ioaddr, CSR15);
31646 if (dev->flags & IFF_PROMISC) {
31647 /* Log any net taps. */
31648 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
31649 lp->init_block->mode =
31650 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
31651 7);
31652- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
31653+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
31654 } else {
31655 lp->init_block->mode =
31656 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
31657- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31658+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
31659 pcnet32_load_multicast(dev);
31660 }
31661
31662 if (suspended) {
31663 int csr5;
31664 /* clear SUSPEND (SPND) - CSR5 bit 0 */
31665- csr5 = lp->a.read_csr(ioaddr, CSR5);
31666- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31667+ csr5 = lp->a->read_csr(ioaddr, CSR5);
31668+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
31669 } else {
31670- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
31671+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
31672 pcnet32_restart(dev, CSR0_NORMAL);
31673 netif_wake_queue(dev);
31674 }
31675@@ -2655,8 +2655,8 @@ static int mdio_read(struct net_device *
31676 if (!lp->mii)
31677 return 0;
31678
31679- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31680- val_out = lp->a.read_bcr(ioaddr, 34);
31681+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31682+ val_out = lp->a->read_bcr(ioaddr, 34);
31683
31684 return val_out;
31685 }
31686@@ -2670,8 +2670,8 @@ static void mdio_write(struct net_device
31687 if (!lp->mii)
31688 return;
31689
31690- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31691- lp->a.write_bcr(ioaddr, 34, val);
31692+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
31693+ lp->a->write_bcr(ioaddr, 34, val);
31694 }
31695
31696 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
31697@@ -2748,7 +2748,7 @@ static void pcnet32_check_media(struct n
31698 curr_link = mii_link_ok(&lp->mii_if);
31699 } else {
31700 ulong ioaddr = dev->base_addr; /* card base I/O address */
31701- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31702+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31703 }
31704 if (!curr_link) {
31705 if (prev_link || verbose) {
31706@@ -2771,13 +2771,13 @@ static void pcnet32_check_media(struct n
31707 (ecmd.duplex == DUPLEX_FULL)
31708 ? "full" : "half");
31709 }
31710- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
31711+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
31712 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
31713 if (lp->mii_if.full_duplex)
31714 bcr9 |= (1 << 0);
31715 else
31716 bcr9 &= ~(1 << 0);
31717- lp->a.write_bcr(dev->base_addr, 9, bcr9);
31718+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
31719 }
31720 } else {
31721 netif_info(lp, link, dev, "link up\n");
31722diff -urNp linux-3.0.7/drivers/net/ppp_generic.c linux-3.0.7/drivers/net/ppp_generic.c
31723--- linux-3.0.7/drivers/net/ppp_generic.c 2011-07-21 22:17:23.000000000 -0400
31724+++ linux-3.0.7/drivers/net/ppp_generic.c 2011-08-23 21:47:55.000000000 -0400
31725@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
31726 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
31727 struct ppp_stats stats;
31728 struct ppp_comp_stats cstats;
31729- char *vers;
31730
31731 switch (cmd) {
31732 case SIOCGPPPSTATS:
31733@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
31734 break;
31735
31736 case SIOCGPPPVER:
31737- vers = PPP_VERSION;
31738- if (copy_to_user(addr, vers, strlen(vers) + 1))
31739+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
31740 break;
31741 err = 0;
31742 break;
31743diff -urNp linux-3.0.7/drivers/net/r8169.c linux-3.0.7/drivers/net/r8169.c
31744--- linux-3.0.7/drivers/net/r8169.c 2011-09-02 18:11:21.000000000 -0400
31745+++ linux-3.0.7/drivers/net/r8169.c 2011-08-23 21:47:55.000000000 -0400
31746@@ -645,12 +645,12 @@ struct rtl8169_private {
31747 struct mdio_ops {
31748 void (*write)(void __iomem *, int, int);
31749 int (*read)(void __iomem *, int);
31750- } mdio_ops;
31751+ } __no_const mdio_ops;
31752
31753 struct pll_power_ops {
31754 void (*down)(struct rtl8169_private *);
31755 void (*up)(struct rtl8169_private *);
31756- } pll_power_ops;
31757+ } __no_const pll_power_ops;
31758
31759 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
31760 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
31761diff -urNp linux-3.0.7/drivers/net/sis190.c linux-3.0.7/drivers/net/sis190.c
31762--- linux-3.0.7/drivers/net/sis190.c 2011-09-02 18:11:21.000000000 -0400
31763+++ linux-3.0.7/drivers/net/sis190.c 2011-10-11 10:44:33.000000000 -0400
31764@@ -1623,7 +1623,7 @@ static int __devinit sis190_get_mac_addr
31765 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
31766 struct net_device *dev)
31767 {
31768- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
31769+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
31770 struct sis190_private *tp = netdev_priv(dev);
31771 struct pci_dev *isa_bridge;
31772 u8 reg, tmp8;
31773diff -urNp linux-3.0.7/drivers/net/sundance.c linux-3.0.7/drivers/net/sundance.c
31774--- linux-3.0.7/drivers/net/sundance.c 2011-07-21 22:17:23.000000000 -0400
31775+++ linux-3.0.7/drivers/net/sundance.c 2011-10-11 10:44:33.000000000 -0400
31776@@ -218,7 +218,7 @@ enum {
31777 struct pci_id_info {
31778 const char *name;
31779 };
31780-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31781+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31782 {"D-Link DFE-550TX FAST Ethernet Adapter"},
31783 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
31784 {"D-Link DFE-580TX 4 port Server Adapter"},
31785diff -urNp linux-3.0.7/drivers/net/tg3.h linux-3.0.7/drivers/net/tg3.h
31786--- linux-3.0.7/drivers/net/tg3.h 2011-07-21 22:17:23.000000000 -0400
31787+++ linux-3.0.7/drivers/net/tg3.h 2011-08-23 21:47:55.000000000 -0400
31788@@ -134,6 +134,7 @@
31789 #define CHIPREV_ID_5750_A0 0x4000
31790 #define CHIPREV_ID_5750_A1 0x4001
31791 #define CHIPREV_ID_5750_A3 0x4003
31792+#define CHIPREV_ID_5750_C1 0x4201
31793 #define CHIPREV_ID_5750_C2 0x4202
31794 #define CHIPREV_ID_5752_A0_HW 0x5000
31795 #define CHIPREV_ID_5752_A0 0x6000
31796diff -urNp linux-3.0.7/drivers/net/tokenring/abyss.c linux-3.0.7/drivers/net/tokenring/abyss.c
31797--- linux-3.0.7/drivers/net/tokenring/abyss.c 2011-07-21 22:17:23.000000000 -0400
31798+++ linux-3.0.7/drivers/net/tokenring/abyss.c 2011-08-23 21:47:55.000000000 -0400
31799@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
31800
31801 static int __init abyss_init (void)
31802 {
31803- abyss_netdev_ops = tms380tr_netdev_ops;
31804+ pax_open_kernel();
31805+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31806
31807- abyss_netdev_ops.ndo_open = abyss_open;
31808- abyss_netdev_ops.ndo_stop = abyss_close;
31809+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
31810+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
31811+ pax_close_kernel();
31812
31813 return pci_register_driver(&abyss_driver);
31814 }
31815diff -urNp linux-3.0.7/drivers/net/tokenring/madgemc.c linux-3.0.7/drivers/net/tokenring/madgemc.c
31816--- linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-07-21 22:17:23.000000000 -0400
31817+++ linux-3.0.7/drivers/net/tokenring/madgemc.c 2011-08-23 21:47:55.000000000 -0400
31818@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
31819
31820 static int __init madgemc_init (void)
31821 {
31822- madgemc_netdev_ops = tms380tr_netdev_ops;
31823- madgemc_netdev_ops.ndo_open = madgemc_open;
31824- madgemc_netdev_ops.ndo_stop = madgemc_close;
31825+ pax_open_kernel();
31826+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31827+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
31828+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
31829+ pax_close_kernel();
31830
31831 return mca_register_driver (&madgemc_driver);
31832 }
31833diff -urNp linux-3.0.7/drivers/net/tokenring/proteon.c linux-3.0.7/drivers/net/tokenring/proteon.c
31834--- linux-3.0.7/drivers/net/tokenring/proteon.c 2011-07-21 22:17:23.000000000 -0400
31835+++ linux-3.0.7/drivers/net/tokenring/proteon.c 2011-08-23 21:47:55.000000000 -0400
31836@@ -353,9 +353,11 @@ static int __init proteon_init(void)
31837 struct platform_device *pdev;
31838 int i, num = 0, err = 0;
31839
31840- proteon_netdev_ops = tms380tr_netdev_ops;
31841- proteon_netdev_ops.ndo_open = proteon_open;
31842- proteon_netdev_ops.ndo_stop = tms380tr_close;
31843+ pax_open_kernel();
31844+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31845+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
31846+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
31847+ pax_close_kernel();
31848
31849 err = platform_driver_register(&proteon_driver);
31850 if (err)
31851diff -urNp linux-3.0.7/drivers/net/tokenring/skisa.c linux-3.0.7/drivers/net/tokenring/skisa.c
31852--- linux-3.0.7/drivers/net/tokenring/skisa.c 2011-07-21 22:17:23.000000000 -0400
31853+++ linux-3.0.7/drivers/net/tokenring/skisa.c 2011-08-23 21:47:55.000000000 -0400
31854@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
31855 struct platform_device *pdev;
31856 int i, num = 0, err = 0;
31857
31858- sk_isa_netdev_ops = tms380tr_netdev_ops;
31859- sk_isa_netdev_ops.ndo_open = sk_isa_open;
31860- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31861+ pax_open_kernel();
31862+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
31863+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
31864+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
31865+ pax_close_kernel();
31866
31867 err = platform_driver_register(&sk_isa_driver);
31868 if (err)
31869diff -urNp linux-3.0.7/drivers/net/tulip/de2104x.c linux-3.0.7/drivers/net/tulip/de2104x.c
31870--- linux-3.0.7/drivers/net/tulip/de2104x.c 2011-07-21 22:17:23.000000000 -0400
31871+++ linux-3.0.7/drivers/net/tulip/de2104x.c 2011-08-23 21:48:14.000000000 -0400
31872@@ -1794,6 +1794,8 @@ static void __devinit de21041_get_srom_i
31873 struct de_srom_info_leaf *il;
31874 void *bufp;
31875
31876+ pax_track_stack();
31877+
31878 /* download entire eeprom */
31879 for (i = 0; i < DE_EEPROM_WORDS; i++)
31880 ((__le16 *)ee_data)[i] =
31881diff -urNp linux-3.0.7/drivers/net/tulip/de4x5.c linux-3.0.7/drivers/net/tulip/de4x5.c
31882--- linux-3.0.7/drivers/net/tulip/de4x5.c 2011-07-21 22:17:23.000000000 -0400
31883+++ linux-3.0.7/drivers/net/tulip/de4x5.c 2011-08-23 21:47:55.000000000 -0400
31884@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
31885 for (i=0; i<ETH_ALEN; i++) {
31886 tmp.addr[i] = dev->dev_addr[i];
31887 }
31888- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31889+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
31890 break;
31891
31892 case DE4X5_SET_HWADDR: /* Set the hardware address */
31893@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
31894 spin_lock_irqsave(&lp->lock, flags);
31895 memcpy(&statbuf, &lp->pktStats, ioc->len);
31896 spin_unlock_irqrestore(&lp->lock, flags);
31897- if (copy_to_user(ioc->data, &statbuf, ioc->len))
31898+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
31899 return -EFAULT;
31900 break;
31901 }
31902diff -urNp linux-3.0.7/drivers/net/tulip/eeprom.c linux-3.0.7/drivers/net/tulip/eeprom.c
31903--- linux-3.0.7/drivers/net/tulip/eeprom.c 2011-07-21 22:17:23.000000000 -0400
31904+++ linux-3.0.7/drivers/net/tulip/eeprom.c 2011-10-11 10:44:33.000000000 -0400
31905@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
31906 {NULL}};
31907
31908
31909-static const char *block_name[] __devinitdata = {
31910+static const char *block_name[] __devinitconst = {
31911 "21140 non-MII",
31912 "21140 MII PHY",
31913 "21142 Serial PHY",
31914diff -urNp linux-3.0.7/drivers/net/tulip/winbond-840.c linux-3.0.7/drivers/net/tulip/winbond-840.c
31915--- linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-07-21 22:17:23.000000000 -0400
31916+++ linux-3.0.7/drivers/net/tulip/winbond-840.c 2011-10-11 10:44:33.000000000 -0400
31917@@ -236,7 +236,7 @@ struct pci_id_info {
31918 int drv_flags; /* Driver use, intended as capability flags. */
31919 };
31920
31921-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
31922+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
31923 { /* Sometime a Level-One switch card. */
31924 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
31925 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
31926diff -urNp linux-3.0.7/drivers/net/usb/hso.c linux-3.0.7/drivers/net/usb/hso.c
31927--- linux-3.0.7/drivers/net/usb/hso.c 2011-07-21 22:17:23.000000000 -0400
31928+++ linux-3.0.7/drivers/net/usb/hso.c 2011-08-23 21:47:55.000000000 -0400
31929@@ -71,7 +71,7 @@
31930 #include <asm/byteorder.h>
31931 #include <linux/serial_core.h>
31932 #include <linux/serial.h>
31933-
31934+#include <asm/local.h>
31935
31936 #define MOD_AUTHOR "Option Wireless"
31937 #define MOD_DESCRIPTION "USB High Speed Option driver"
31938@@ -257,7 +257,7 @@ struct hso_serial {
31939
31940 /* from usb_serial_port */
31941 struct tty_struct *tty;
31942- int open_count;
31943+ local_t open_count;
31944 spinlock_t serial_lock;
31945
31946 int (*write_data) (struct hso_serial *serial);
31947@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
31948 struct urb *urb;
31949
31950 urb = serial->rx_urb[0];
31951- if (serial->open_count > 0) {
31952+ if (local_read(&serial->open_count) > 0) {
31953 count = put_rxbuf_data(urb, serial);
31954 if (count == -1)
31955 return;
31956@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
31957 DUMP1(urb->transfer_buffer, urb->actual_length);
31958
31959 /* Anyone listening? */
31960- if (serial->open_count == 0)
31961+ if (local_read(&serial->open_count) == 0)
31962 return;
31963
31964 if (status == 0) {
31965@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
31966 spin_unlock_irq(&serial->serial_lock);
31967
31968 /* check for port already opened, if not set the termios */
31969- serial->open_count++;
31970- if (serial->open_count == 1) {
31971+ if (local_inc_return(&serial->open_count) == 1) {
31972 serial->rx_state = RX_IDLE;
31973 /* Force default termio settings */
31974 _hso_serial_set_termios(tty, NULL);
31975@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
31976 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
31977 if (result) {
31978 hso_stop_serial_device(serial->parent);
31979- serial->open_count--;
31980+ local_dec(&serial->open_count);
31981 kref_put(&serial->parent->ref, hso_serial_ref_free);
31982 }
31983 } else {
31984@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
31985
31986 /* reset the rts and dtr */
31987 /* do the actual close */
31988- serial->open_count--;
31989+ local_dec(&serial->open_count);
31990
31991- if (serial->open_count <= 0) {
31992- serial->open_count = 0;
31993+ if (local_read(&serial->open_count) <= 0) {
31994+ local_set(&serial->open_count, 0);
31995 spin_lock_irq(&serial->serial_lock);
31996 if (serial->tty == tty) {
31997 serial->tty->driver_data = NULL;
31998@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
31999
32000 /* the actual setup */
32001 spin_lock_irqsave(&serial->serial_lock, flags);
32002- if (serial->open_count)
32003+ if (local_read(&serial->open_count))
32004 _hso_serial_set_termios(tty, old);
32005 else
32006 tty->termios = old;
32007@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32008 D1("Pending read interrupt on port %d\n", i);
32009 spin_lock(&serial->serial_lock);
32010 if (serial->rx_state == RX_IDLE &&
32011- serial->open_count > 0) {
32012+ local_read(&serial->open_count) > 0) {
32013 /* Setup and send a ctrl req read on
32014 * port i */
32015 if (!serial->rx_urb_filled[0]) {
32016@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32017 /* Start all serial ports */
32018 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32019 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32020- if (dev2ser(serial_table[i])->open_count) {
32021+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32022 result =
32023 hso_start_serial_device(serial_table[i], GFP_NOIO);
32024 hso_kick_transmit(dev2ser(serial_table[i]));
32025diff -urNp linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c
32026--- linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-07-21 22:17:23.000000000 -0400
32027+++ linux-3.0.7/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-23 21:47:55.000000000 -0400
32028@@ -594,8 +594,7 @@ vmxnet3_set_rss_indir(struct net_device
32029 * Return with error code if any of the queue indices
32030 * is out of range
32031 */
32032- if (p->ring_index[i] < 0 ||
32033- p->ring_index[i] >= adapter->num_rx_queues)
32034+ if (p->ring_index[i] >= adapter->num_rx_queues)
32035 return -EINVAL;
32036 }
32037
32038diff -urNp linux-3.0.7/drivers/net/vxge/vxge-config.h linux-3.0.7/drivers/net/vxge/vxge-config.h
32039--- linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-07-21 22:17:23.000000000 -0400
32040+++ linux-3.0.7/drivers/net/vxge/vxge-config.h 2011-08-23 21:47:55.000000000 -0400
32041@@ -512,7 +512,7 @@ struct vxge_hw_uld_cbs {
32042 void (*link_down)(struct __vxge_hw_device *devh);
32043 void (*crit_err)(struct __vxge_hw_device *devh,
32044 enum vxge_hw_event type, u64 ext_data);
32045-};
32046+} __no_const;
32047
32048 /*
32049 * struct __vxge_hw_blockpool_entry - Block private data structure
32050diff -urNp linux-3.0.7/drivers/net/vxge/vxge-main.c linux-3.0.7/drivers/net/vxge/vxge-main.c
32051--- linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-07-21 22:17:23.000000000 -0400
32052+++ linux-3.0.7/drivers/net/vxge/vxge-main.c 2011-08-23 21:48:14.000000000 -0400
32053@@ -98,6 +98,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32054 struct sk_buff *completed[NR_SKB_COMPLETED];
32055 int more;
32056
32057+ pax_track_stack();
32058+
32059 do {
32060 more = 0;
32061 skb_ptr = completed;
32062@@ -1920,6 +1922,8 @@ static enum vxge_hw_status vxge_rth_conf
32063 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32064 int index;
32065
32066+ pax_track_stack();
32067+
32068 /*
32069 * Filling
32070 * - itable with bucket numbers
32071diff -urNp linux-3.0.7/drivers/net/vxge/vxge-traffic.h linux-3.0.7/drivers/net/vxge/vxge-traffic.h
32072--- linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-07-21 22:17:23.000000000 -0400
32073+++ linux-3.0.7/drivers/net/vxge/vxge-traffic.h 2011-08-23 21:47:55.000000000 -0400
32074@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32075 struct vxge_hw_mempool_dma *dma_object,
32076 u32 index,
32077 u32 is_last);
32078-};
32079+} __no_const;
32080
32081 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32082 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32083diff -urNp linux-3.0.7/drivers/net/wan/cycx_x25.c linux-3.0.7/drivers/net/wan/cycx_x25.c
32084--- linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-07-21 22:17:23.000000000 -0400
32085+++ linux-3.0.7/drivers/net/wan/cycx_x25.c 2011-08-23 21:48:14.000000000 -0400
32086@@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
32087 unsigned char hex[1024],
32088 * phex = hex;
32089
32090+ pax_track_stack();
32091+
32092 if (len >= (sizeof(hex) / 2))
32093 len = (sizeof(hex) / 2) - 1;
32094
32095diff -urNp linux-3.0.7/drivers/net/wan/hdlc_x25.c linux-3.0.7/drivers/net/wan/hdlc_x25.c
32096--- linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-07-21 22:17:23.000000000 -0400
32097+++ linux-3.0.7/drivers/net/wan/hdlc_x25.c 2011-08-23 21:47:55.000000000 -0400
32098@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32099
32100 static int x25_open(struct net_device *dev)
32101 {
32102- struct lapb_register_struct cb;
32103+ static struct lapb_register_struct cb = {
32104+ .connect_confirmation = x25_connected,
32105+ .connect_indication = x25_connected,
32106+ .disconnect_confirmation = x25_disconnected,
32107+ .disconnect_indication = x25_disconnected,
32108+ .data_indication = x25_data_indication,
32109+ .data_transmit = x25_data_transmit
32110+ };
32111 int result;
32112
32113- cb.connect_confirmation = x25_connected;
32114- cb.connect_indication = x25_connected;
32115- cb.disconnect_confirmation = x25_disconnected;
32116- cb.disconnect_indication = x25_disconnected;
32117- cb.data_indication = x25_data_indication;
32118- cb.data_transmit = x25_data_transmit;
32119-
32120 result = lapb_register(dev, &cb);
32121 if (result != LAPB_OK)
32122 return result;
32123diff -urNp linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c
32124--- linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-07-21 22:17:23.000000000 -0400
32125+++ linux-3.0.7/drivers/net/wimax/i2400m/usb-fw.c 2011-08-23 21:48:14.000000000 -0400
32126@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32127 int do_autopm = 1;
32128 DECLARE_COMPLETION_ONSTACK(notif_completion);
32129
32130+ pax_track_stack();
32131+
32132 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32133 i2400m, ack, ack_size);
32134 BUG_ON(_ack == i2400m->bm_ack_buf);
32135diff -urNp linux-3.0.7/drivers/net/wireless/airo.c linux-3.0.7/drivers/net/wireless/airo.c
32136--- linux-3.0.7/drivers/net/wireless/airo.c 2011-09-02 18:11:21.000000000 -0400
32137+++ linux-3.0.7/drivers/net/wireless/airo.c 2011-08-23 21:48:14.000000000 -0400
32138@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32139 BSSListElement * loop_net;
32140 BSSListElement * tmp_net;
32141
32142+ pax_track_stack();
32143+
32144 /* Blow away current list of scan results */
32145 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32146 list_move_tail (&loop_net->list, &ai->network_free_list);
32147@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32148 WepKeyRid wkr;
32149 int rc;
32150
32151+ pax_track_stack();
32152+
32153 memset( &mySsid, 0, sizeof( mySsid ) );
32154 kfree (ai->flash);
32155 ai->flash = NULL;
32156@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32157 __le32 *vals = stats.vals;
32158 int len;
32159
32160+ pax_track_stack();
32161+
32162 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32163 return -ENOMEM;
32164 data = file->private_data;
32165@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32166 /* If doLoseSync is not 1, we won't do a Lose Sync */
32167 int doLoseSync = -1;
32168
32169+ pax_track_stack();
32170+
32171 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32172 return -ENOMEM;
32173 data = file->private_data;
32174@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32175 int i;
32176 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32177
32178+ pax_track_stack();
32179+
32180 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32181 if (!qual)
32182 return -ENOMEM;
32183@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32184 CapabilityRid cap_rid;
32185 __le32 *vals = stats_rid.vals;
32186
32187+ pax_track_stack();
32188+
32189 /* Get stats out of the card */
32190 clear_bit(JOB_WSTATS, &local->jobs);
32191 if (local->power.event) {
32192diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c
32193--- linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-07-21 22:17:23.000000000 -0400
32194+++ linux-3.0.7/drivers/net/wireless/ath/ath5k/debug.c 2011-08-23 21:48:14.000000000 -0400
32195@@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
32196 unsigned int v;
32197 u64 tsf;
32198
32199+ pax_track_stack();
32200+
32201 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32202 len += snprintf(buf+len, sizeof(buf)-len,
32203 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32204@@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
32205 unsigned int len = 0;
32206 unsigned int i;
32207
32208+ pax_track_stack();
32209+
32210 len += snprintf(buf+len, sizeof(buf)-len,
32211 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32212
32213@@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
32214 unsigned int i;
32215 unsigned int v;
32216
32217+ pax_track_stack();
32218+
32219 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
32220 sc->ah->ah_ant_mode);
32221 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
32222@@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
32223 unsigned int len = 0;
32224 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
32225
32226+ pax_track_stack();
32227+
32228 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
32229 sc->bssidmask);
32230 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
32231@@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
32232 unsigned int len = 0;
32233 int i;
32234
32235+ pax_track_stack();
32236+
32237 len += snprintf(buf+len, sizeof(buf)-len,
32238 "RX\n---------------------\n");
32239 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
32240@@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
32241 char buf[700];
32242 unsigned int len = 0;
32243
32244+ pax_track_stack();
32245+
32246 len += snprintf(buf+len, sizeof(buf)-len,
32247 "HW has PHY error counters:\t%s\n",
32248 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
32249@@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32250 struct ath5k_buf *bf, *bf0;
32251 int i, n;
32252
32253+ pax_track_stack();
32254+
32255 len += snprintf(buf+len, sizeof(buf)-len,
32256 "available txbuffers: %d\n", sc->txbuf_len);
32257
32258diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32259--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-07-21 22:17:23.000000000 -0400
32260+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-23 21:48:14.000000000 -0400
32261@@ -757,6 +757,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32262 int i, im, j;
32263 int nmeasurement;
32264
32265+ pax_track_stack();
32266+
32267 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32268 if (ah->txchainmask & (1 << i))
32269 num_chains++;
32270diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32271--- linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-07-21 22:17:23.000000000 -0400
32272+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-23 21:48:14.000000000 -0400
32273@@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
32274 int theta_low_bin = 0;
32275 int i;
32276
32277+ pax_track_stack();
32278+
32279 /* disregard any bin that contains <= 16 samples */
32280 thresh_accum_cnt = 16;
32281 scale_factor = 5;
32282diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c
32283--- linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-07-21 22:17:23.000000000 -0400
32284+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/debug.c 2011-08-23 21:48:14.000000000 -0400
32285@@ -337,6 +337,8 @@ static ssize_t read_file_interrupt(struc
32286 char buf[512];
32287 unsigned int len = 0;
32288
32289+ pax_track_stack();
32290+
32291 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32292 len += snprintf(buf + len, sizeof(buf) - len,
32293 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32294@@ -427,6 +429,8 @@ static ssize_t read_file_wiphy(struct fi
32295 u8 addr[ETH_ALEN];
32296 u32 tmp;
32297
32298+ pax_track_stack();
32299+
32300 len += snprintf(buf + len, sizeof(buf) - len,
32301 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32302 wiphy_name(sc->hw->wiphy),
32303diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32304--- linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-07-21 22:17:23.000000000 -0400
32305+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-08-23 21:48:14.000000000 -0400
32306@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32307 unsigned int len = 0;
32308 int ret = 0;
32309
32310+ pax_track_stack();
32311+
32312 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32313
32314 ath9k_htc_ps_wakeup(priv);
32315@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32316 unsigned int len = 0;
32317 int ret = 0;
32318
32319+ pax_track_stack();
32320+
32321 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32322
32323 ath9k_htc_ps_wakeup(priv);
32324@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32325 unsigned int len = 0;
32326 int ret = 0;
32327
32328+ pax_track_stack();
32329+
32330 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32331
32332 ath9k_htc_ps_wakeup(priv);
32333@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32334 char buf[512];
32335 unsigned int len = 0;
32336
32337+ pax_track_stack();
32338+
32339 len += snprintf(buf + len, sizeof(buf) - len,
32340 "%20s : %10u\n", "Buffers queued",
32341 priv->debug.tx_stats.buf_queued);
32342@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32343 char buf[512];
32344 unsigned int len = 0;
32345
32346+ pax_track_stack();
32347+
32348 spin_lock_bh(&priv->tx.tx_lock);
32349
32350 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32351@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32352 char buf[512];
32353 unsigned int len = 0;
32354
32355+ pax_track_stack();
32356+
32357 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32358 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32359
32360diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h
32361--- linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-09-02 18:11:21.000000000 -0400
32362+++ linux-3.0.7/drivers/net/wireless/ath/ath9k/hw.h 2011-08-23 21:47:55.000000000 -0400
32363@@ -585,7 +585,7 @@ struct ath_hw_private_ops {
32364
32365 /* ANI */
32366 void (*ani_cache_ini_regs)(struct ath_hw *ah);
32367-};
32368+} __no_const;
32369
32370 /**
32371 * struct ath_hw_ops - callbacks used by hardware code and driver code
32372@@ -637,7 +637,7 @@ struct ath_hw_ops {
32373 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32374 struct ath_hw_antcomb_conf *antconf);
32375
32376-};
32377+} __no_const;
32378
32379 struct ath_nf_limits {
32380 s16 max;
32381@@ -650,7 +650,7 @@ struct ath_nf_limits {
32382 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32383
32384 struct ath_hw {
32385- struct ath_ops reg_ops;
32386+ ath_ops_no_const reg_ops;
32387
32388 struct ieee80211_hw *hw;
32389 struct ath_common common;
32390diff -urNp linux-3.0.7/drivers/net/wireless/ath/ath.h linux-3.0.7/drivers/net/wireless/ath/ath.h
32391--- linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-07-21 22:17:23.000000000 -0400
32392+++ linux-3.0.7/drivers/net/wireless/ath/ath.h 2011-08-23 21:47:55.000000000 -0400
32393@@ -121,6 +121,7 @@ struct ath_ops {
32394 void (*write_flush) (void *);
32395 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32396 };
32397+typedef struct ath_ops __no_const ath_ops_no_const;
32398
32399 struct ath_common;
32400 struct ath_bus_ops;
32401diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c
32402--- linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-07-21 22:17:23.000000000 -0400
32403+++ linux-3.0.7/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-23 21:48:14.000000000 -0400
32404@@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
32405 int err;
32406 DECLARE_SSID_BUF(ssid);
32407
32408+ pax_track_stack();
32409+
32410 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32411
32412 if (ssid_len)
32413@@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
32414 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32415 int err;
32416
32417+ pax_track_stack();
32418+
32419 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32420 idx, keylen, len);
32421
32422diff -urNp linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c
32423--- linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-07-21 22:17:23.000000000 -0400
32424+++ linux-3.0.7/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-23 21:48:14.000000000 -0400
32425@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32426 unsigned long flags;
32427 DECLARE_SSID_BUF(ssid);
32428
32429+ pax_track_stack();
32430+
32431 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32432 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32433 print_ssid(ssid, info_element->data, info_element->len),
32434diff -urNp linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c
32435--- linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:54:54.000000000 -0400
32436+++ linux-3.0.7/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-10-16 21:55:27.000000000 -0400
32437@@ -3962,7 +3962,9 @@ static int iwl3945_pci_probe(struct pci_
32438 */
32439 if (iwl3945_mod_params.disable_hw_scan) {
32440 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32441- iwl3945_hw_ops.hw_scan = NULL;
32442+ pax_open_kernel();
32443+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32444+ pax_close_kernel();
32445 }
32446
32447 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32448diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32449--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-07-21 22:17:23.000000000 -0400
32450+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-23 21:48:14.000000000 -0400
32451@@ -910,6 +910,8 @@ static void rs_tx_status(void *priv_r, s
32452 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32453 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32454
32455+ pax_track_stack();
32456+
32457 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32458
32459 /* Treat uninitialized rate scaling data same as non-existing. */
32460@@ -2918,6 +2920,8 @@ static void rs_fill_link_cmd(struct iwl_
32461 container_of(lq_sta, struct iwl_station_priv, lq_sta);
32462 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32463
32464+ pax_track_stack();
32465+
32466 /* Override starting rate (index 0) if needed for debug purposes */
32467 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32468
32469diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32470--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-07-21 22:17:23.000000000 -0400
32471+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-23 21:48:14.000000000 -0400
32472@@ -548,6 +548,8 @@ static ssize_t iwl_dbgfs_status_read(str
32473 int pos = 0;
32474 const size_t bufsz = sizeof(buf);
32475
32476+ pax_track_stack();
32477+
32478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32479 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32481@@ -680,6 +682,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32482 char buf[256 * NUM_IWL_RXON_CTX];
32483 const size_t bufsz = sizeof(buf);
32484
32485+ pax_track_stack();
32486+
32487 for_each_context(priv, ctx) {
32488 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32489 ctx->ctxid);
32490diff -urNp linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h
32491--- linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-07-21 22:17:23.000000000 -0400
32492+++ linux-3.0.7/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-23 21:47:55.000000000 -0400
32493@@ -68,8 +68,8 @@ do {
32494 } while (0)
32495
32496 #else
32497-#define IWL_DEBUG(__priv, level, fmt, args...)
32498-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32499+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32500+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32501 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32502 const void *p, u32 len)
32503 {}
32504diff -urNp linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c
32505--- linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-07-21 22:17:23.000000000 -0400
32506+++ linux-3.0.7/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-23 21:48:14.000000000 -0400
32507@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32508 int buf_len = 512;
32509 size_t len = 0;
32510
32511+ pax_track_stack();
32512+
32513 if (*ppos != 0)
32514 return 0;
32515 if (count < sizeof(buf))
32516diff -urNp linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c
32517--- linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-07-21 22:17:23.000000000 -0400
32518+++ linux-3.0.7/drivers/net/wireless/mac80211_hwsim.c 2011-08-23 21:47:55.000000000 -0400
32519@@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
32520 return -EINVAL;
32521
32522 if (fake_hw_scan) {
32523- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32524- mac80211_hwsim_ops.sw_scan_start = NULL;
32525- mac80211_hwsim_ops.sw_scan_complete = NULL;
32526+ pax_open_kernel();
32527+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
32528+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
32529+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
32530+ pax_close_kernel();
32531 }
32532
32533 spin_lock_init(&hwsim_radio_lock);
32534diff -urNp linux-3.0.7/drivers/net/wireless/rndis_wlan.c linux-3.0.7/drivers/net/wireless/rndis_wlan.c
32535--- linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-07-21 22:17:23.000000000 -0400
32536+++ linux-3.0.7/drivers/net/wireless/rndis_wlan.c 2011-08-23 21:47:55.000000000 -0400
32537@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
32538
32539 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
32540
32541- if (rts_threshold < 0 || rts_threshold > 2347)
32542+ if (rts_threshold > 2347)
32543 rts_threshold = 2347;
32544
32545 tmp = cpu_to_le32(rts_threshold);
32546diff -urNp linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
32547--- linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-07-21 22:17:23.000000000 -0400
32548+++ linux-3.0.7/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-23 21:48:14.000000000 -0400
32549@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
32550 u8 rfpath;
32551 u8 num_total_rfpath = rtlphy->num_total_rfpath;
32552
32553+ pax_track_stack();
32554+
32555 precommoncmdcnt = 0;
32556 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
32557 MAX_PRECMD_CNT,
32558diff -urNp linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h
32559--- linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-07-21 22:17:23.000000000 -0400
32560+++ linux-3.0.7/drivers/net/wireless/wl1251/wl1251.h 2011-08-23 21:47:55.000000000 -0400
32561@@ -266,7 +266,7 @@ struct wl1251_if_operations {
32562 void (*reset)(struct wl1251 *wl);
32563 void (*enable_irq)(struct wl1251 *wl);
32564 void (*disable_irq)(struct wl1251 *wl);
32565-};
32566+} __no_const;
32567
32568 struct wl1251 {
32569 struct ieee80211_hw *hw;
32570diff -urNp linux-3.0.7/drivers/net/wireless/wl12xx/spi.c linux-3.0.7/drivers/net/wireless/wl12xx/spi.c
32571--- linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-07-21 22:17:23.000000000 -0400
32572+++ linux-3.0.7/drivers/net/wireless/wl12xx/spi.c 2011-08-23 21:48:14.000000000 -0400
32573@@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
32574 u32 chunk_len;
32575 int i;
32576
32577+ pax_track_stack();
32578+
32579 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
32580
32581 spi_message_init(&m);
32582diff -urNp linux-3.0.7/drivers/oprofile/buffer_sync.c linux-3.0.7/drivers/oprofile/buffer_sync.c
32583--- linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-07-21 22:17:23.000000000 -0400
32584+++ linux-3.0.7/drivers/oprofile/buffer_sync.c 2011-08-23 21:47:55.000000000 -0400
32585@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
32586 if (cookie == NO_COOKIE)
32587 offset = pc;
32588 if (cookie == INVALID_COOKIE) {
32589- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32590+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32591 offset = pc;
32592 }
32593 if (cookie != last_cookie) {
32594@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
32595 /* add userspace sample */
32596
32597 if (!mm) {
32598- atomic_inc(&oprofile_stats.sample_lost_no_mm);
32599+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32600 return 0;
32601 }
32602
32603 cookie = lookup_dcookie(mm, s->eip, &offset);
32604
32605 if (cookie == INVALID_COOKIE) {
32606- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32607+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32608 return 0;
32609 }
32610
32611@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
32612 /* ignore backtraces if failed to add a sample */
32613 if (state == sb_bt_start) {
32614 state = sb_bt_ignore;
32615- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32616+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32617 }
32618 }
32619 release_mm(mm);
32620diff -urNp linux-3.0.7/drivers/oprofile/event_buffer.c linux-3.0.7/drivers/oprofile/event_buffer.c
32621--- linux-3.0.7/drivers/oprofile/event_buffer.c 2011-07-21 22:17:23.000000000 -0400
32622+++ linux-3.0.7/drivers/oprofile/event_buffer.c 2011-08-23 21:47:55.000000000 -0400
32623@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32624 }
32625
32626 if (buffer_pos == buffer_size) {
32627- atomic_inc(&oprofile_stats.event_lost_overflow);
32628+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32629 return;
32630 }
32631
32632diff -urNp linux-3.0.7/drivers/oprofile/oprof.c linux-3.0.7/drivers/oprofile/oprof.c
32633--- linux-3.0.7/drivers/oprofile/oprof.c 2011-07-21 22:17:23.000000000 -0400
32634+++ linux-3.0.7/drivers/oprofile/oprof.c 2011-08-23 21:47:55.000000000 -0400
32635@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32636 if (oprofile_ops.switch_events())
32637 return;
32638
32639- atomic_inc(&oprofile_stats.multiplex_counter);
32640+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32641 start_switch_worker();
32642 }
32643
32644diff -urNp linux-3.0.7/drivers/oprofile/oprofilefs.c linux-3.0.7/drivers/oprofile/oprofilefs.c
32645--- linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-07-21 22:17:23.000000000 -0400
32646+++ linux-3.0.7/drivers/oprofile/oprofilefs.c 2011-08-23 21:47:55.000000000 -0400
32647@@ -186,7 +186,7 @@ static const struct file_operations atom
32648
32649
32650 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32651- char const *name, atomic_t *val)
32652+ char const *name, atomic_unchecked_t *val)
32653 {
32654 return __oprofilefs_create_file(sb, root, name,
32655 &atomic_ro_fops, 0444, val);
32656diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.c linux-3.0.7/drivers/oprofile/oprofile_stats.c
32657--- linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-07-21 22:17:23.000000000 -0400
32658+++ linux-3.0.7/drivers/oprofile/oprofile_stats.c 2011-08-23 21:47:55.000000000 -0400
32659@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32660 cpu_buf->sample_invalid_eip = 0;
32661 }
32662
32663- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32664- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32665- atomic_set(&oprofile_stats.event_lost_overflow, 0);
32666- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32667- atomic_set(&oprofile_stats.multiplex_counter, 0);
32668+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32669+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32670+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32671+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32672+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32673 }
32674
32675
32676diff -urNp linux-3.0.7/drivers/oprofile/oprofile_stats.h linux-3.0.7/drivers/oprofile/oprofile_stats.h
32677--- linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-07-21 22:17:23.000000000 -0400
32678+++ linux-3.0.7/drivers/oprofile/oprofile_stats.h 2011-08-23 21:47:55.000000000 -0400
32679@@ -13,11 +13,11 @@
32680 #include <asm/atomic.h>
32681
32682 struct oprofile_stat_struct {
32683- atomic_t sample_lost_no_mm;
32684- atomic_t sample_lost_no_mapping;
32685- atomic_t bt_lost_no_mapping;
32686- atomic_t event_lost_overflow;
32687- atomic_t multiplex_counter;
32688+ atomic_unchecked_t sample_lost_no_mm;
32689+ atomic_unchecked_t sample_lost_no_mapping;
32690+ atomic_unchecked_t bt_lost_no_mapping;
32691+ atomic_unchecked_t event_lost_overflow;
32692+ atomic_unchecked_t multiplex_counter;
32693 };
32694
32695 extern struct oprofile_stat_struct oprofile_stats;
32696diff -urNp linux-3.0.7/drivers/parport/procfs.c linux-3.0.7/drivers/parport/procfs.c
32697--- linux-3.0.7/drivers/parport/procfs.c 2011-07-21 22:17:23.000000000 -0400
32698+++ linux-3.0.7/drivers/parport/procfs.c 2011-08-23 21:47:55.000000000 -0400
32699@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32700
32701 *ppos += len;
32702
32703- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32704+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32705 }
32706
32707 #ifdef CONFIG_PARPORT_1284
32708@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32709
32710 *ppos += len;
32711
32712- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32713+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32714 }
32715 #endif /* IEEE1284.3 support. */
32716
32717diff -urNp linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h
32718--- linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-07-21 22:17:23.000000000 -0400
32719+++ linux-3.0.7/drivers/pci/hotplug/cpci_hotplug.h 2011-08-23 21:47:55.000000000 -0400
32720@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
32721 int (*hardware_test) (struct slot* slot, u32 value);
32722 u8 (*get_power) (struct slot* slot);
32723 int (*set_power) (struct slot* slot, int value);
32724-};
32725+} __no_const;
32726
32727 struct cpci_hp_controller {
32728 unsigned int irq;
32729diff -urNp linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c
32730--- linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-07-21 22:17:23.000000000 -0400
32731+++ linux-3.0.7/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-23 21:47:55.000000000 -0400
32732@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32733
32734 void compaq_nvram_init (void __iomem *rom_start)
32735 {
32736+
32737+#ifndef CONFIG_PAX_KERNEXEC
32738 if (rom_start) {
32739 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32740 }
32741+#endif
32742+
32743 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32744
32745 /* initialize our int15 lock */
32746diff -urNp linux-3.0.7/drivers/pci/pcie/aspm.c linux-3.0.7/drivers/pci/pcie/aspm.c
32747--- linux-3.0.7/drivers/pci/pcie/aspm.c 2011-07-21 22:17:23.000000000 -0400
32748+++ linux-3.0.7/drivers/pci/pcie/aspm.c 2011-08-23 21:47:55.000000000 -0400
32749@@ -27,9 +27,9 @@
32750 #define MODULE_PARAM_PREFIX "pcie_aspm."
32751
32752 /* Note: those are not register definitions */
32753-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32754-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32755-#define ASPM_STATE_L1 (4) /* L1 state */
32756+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32757+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32758+#define ASPM_STATE_L1 (4U) /* L1 state */
32759 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32760 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32761
32762diff -urNp linux-3.0.7/drivers/pci/probe.c linux-3.0.7/drivers/pci/probe.c
32763--- linux-3.0.7/drivers/pci/probe.c 2011-07-21 22:17:23.000000000 -0400
32764+++ linux-3.0.7/drivers/pci/probe.c 2011-08-23 21:47:55.000000000 -0400
32765@@ -129,7 +129,7 @@ int __pci_read_base(struct pci_dev *dev,
32766 u32 l, sz, mask;
32767 u16 orig_cmd;
32768
32769- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
32770+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
32771
32772 if (!dev->mmio_always_on) {
32773 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
32774diff -urNp linux-3.0.7/drivers/pci/proc.c linux-3.0.7/drivers/pci/proc.c
32775--- linux-3.0.7/drivers/pci/proc.c 2011-07-21 22:17:23.000000000 -0400
32776+++ linux-3.0.7/drivers/pci/proc.c 2011-08-23 21:48:14.000000000 -0400
32777@@ -476,7 +476,16 @@ static const struct file_operations proc
32778 static int __init pci_proc_init(void)
32779 {
32780 struct pci_dev *dev = NULL;
32781+
32782+#ifdef CONFIG_GRKERNSEC_PROC_ADD
32783+#ifdef CONFIG_GRKERNSEC_PROC_USER
32784+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32785+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32786+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32787+#endif
32788+#else
32789 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32790+#endif
32791 proc_create("devices", 0, proc_bus_pci_dir,
32792 &proc_bus_pci_dev_operations);
32793 proc_initialized = 1;
32794diff -urNp linux-3.0.7/drivers/pci/xen-pcifront.c linux-3.0.7/drivers/pci/xen-pcifront.c
32795--- linux-3.0.7/drivers/pci/xen-pcifront.c 2011-07-21 22:17:23.000000000 -0400
32796+++ linux-3.0.7/drivers/pci/xen-pcifront.c 2011-08-23 21:48:14.000000000 -0400
32797@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
32798 struct pcifront_sd *sd = bus->sysdata;
32799 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32800
32801+ pax_track_stack();
32802+
32803 if (verbose_request)
32804 dev_info(&pdev->xdev->dev,
32805 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
32806@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
32807 struct pcifront_sd *sd = bus->sysdata;
32808 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32809
32810+ pax_track_stack();
32811+
32812 if (verbose_request)
32813 dev_info(&pdev->xdev->dev,
32814 "write dev=%04x:%02x:%02x.%01x - "
32815@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
32816 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32817 struct msi_desc *entry;
32818
32819+ pax_track_stack();
32820+
32821 if (nvec > SH_INFO_MAX_VEC) {
32822 dev_err(&dev->dev, "too much vector for pci frontend: %x."
32823 " Increase SH_INFO_MAX_VEC.\n", nvec);
32824@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
32825 struct pcifront_sd *sd = dev->bus->sysdata;
32826 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32827
32828+ pax_track_stack();
32829+
32830 err = do_pci_op(pdev, &op);
32831
32832 /* What should do for error ? */
32833@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
32834 struct pcifront_sd *sd = dev->bus->sysdata;
32835 struct pcifront_device *pdev = pcifront_get_pdev(sd);
32836
32837+ pax_track_stack();
32838+
32839 err = do_pci_op(pdev, &op);
32840 if (likely(!err)) {
32841 vector[0] = op.value;
32842diff -urNp linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c
32843--- linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-07-21 22:17:23.000000000 -0400
32844+++ linux-3.0.7/drivers/platform/x86/thinkpad_acpi.c 2011-08-23 21:47:55.000000000 -0400
32845@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
32846 return 0;
32847 }
32848
32849-void static hotkey_mask_warn_incomplete_mask(void)
32850+static void hotkey_mask_warn_incomplete_mask(void)
32851 {
32852 /* log only what the user can fix... */
32853 const u32 wantedmask = hotkey_driver_mask &
32854diff -urNp linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c
32855--- linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-07-21 22:17:23.000000000 -0400
32856+++ linux-3.0.7/drivers/pnp/pnpbios/bioscalls.c 2011-08-23 21:47:55.000000000 -0400
32857@@ -59,7 +59,7 @@ do { \
32858 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
32859 } while(0)
32860
32861-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
32862+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
32863 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
32864
32865 /*
32866@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
32867
32868 cpu = get_cpu();
32869 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
32870+
32871+ pax_open_kernel();
32872 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
32873+ pax_close_kernel();
32874
32875 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
32876 spin_lock_irqsave(&pnp_bios_lock, flags);
32877@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
32878 :"memory");
32879 spin_unlock_irqrestore(&pnp_bios_lock, flags);
32880
32881+ pax_open_kernel();
32882 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
32883+ pax_close_kernel();
32884+
32885 put_cpu();
32886
32887 /* If we get here and this is set then the PnP BIOS faulted on us. */
32888@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
32889 return status;
32890 }
32891
32892-void pnpbios_calls_init(union pnp_bios_install_struct *header)
32893+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
32894 {
32895 int i;
32896
32897@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
32898 pnp_bios_callpoint.offset = header->fields.pm16offset;
32899 pnp_bios_callpoint.segment = PNP_CS16;
32900
32901+ pax_open_kernel();
32902+
32903 for_each_possible_cpu(i) {
32904 struct desc_struct *gdt = get_cpu_gdt_table(i);
32905 if (!gdt)
32906@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
32907 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
32908 (unsigned long)__va(header->fields.pm16dseg));
32909 }
32910+
32911+ pax_close_kernel();
32912 }
32913diff -urNp linux-3.0.7/drivers/pnp/resource.c linux-3.0.7/drivers/pnp/resource.c
32914--- linux-3.0.7/drivers/pnp/resource.c 2011-07-21 22:17:23.000000000 -0400
32915+++ linux-3.0.7/drivers/pnp/resource.c 2011-08-23 21:47:55.000000000 -0400
32916@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
32917 return 1;
32918
32919 /* check if the resource is valid */
32920- if (*irq < 0 || *irq > 15)
32921+ if (*irq > 15)
32922 return 0;
32923
32924 /* check if the resource is reserved */
32925@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
32926 return 1;
32927
32928 /* check if the resource is valid */
32929- if (*dma < 0 || *dma == 4 || *dma > 7)
32930+ if (*dma == 4 || *dma > 7)
32931 return 0;
32932
32933 /* check if the resource is reserved */
32934diff -urNp linux-3.0.7/drivers/power/bq27x00_battery.c linux-3.0.7/drivers/power/bq27x00_battery.c
32935--- linux-3.0.7/drivers/power/bq27x00_battery.c 2011-07-21 22:17:23.000000000 -0400
32936+++ linux-3.0.7/drivers/power/bq27x00_battery.c 2011-08-23 21:47:55.000000000 -0400
32937@@ -67,7 +67,7 @@
32938 struct bq27x00_device_info;
32939 struct bq27x00_access_methods {
32940 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
32941-};
32942+} __no_const;
32943
32944 enum bq27x00_chip { BQ27000, BQ27500 };
32945
32946diff -urNp linux-3.0.7/drivers/regulator/max8660.c linux-3.0.7/drivers/regulator/max8660.c
32947--- linux-3.0.7/drivers/regulator/max8660.c 2011-07-21 22:17:23.000000000 -0400
32948+++ linux-3.0.7/drivers/regulator/max8660.c 2011-08-23 21:47:55.000000000 -0400
32949@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
32950 max8660->shadow_regs[MAX8660_OVER1] = 5;
32951 } else {
32952 /* Otherwise devices can be toggled via software */
32953- max8660_dcdc_ops.enable = max8660_dcdc_enable;
32954- max8660_dcdc_ops.disable = max8660_dcdc_disable;
32955+ pax_open_kernel();
32956+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
32957+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
32958+ pax_close_kernel();
32959 }
32960
32961 /*
32962diff -urNp linux-3.0.7/drivers/regulator/mc13892-regulator.c linux-3.0.7/drivers/regulator/mc13892-regulator.c
32963--- linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-07-21 22:17:23.000000000 -0400
32964+++ linux-3.0.7/drivers/regulator/mc13892-regulator.c 2011-08-23 21:47:55.000000000 -0400
32965@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
32966 }
32967 mc13xxx_unlock(mc13892);
32968
32969- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32970+ pax_open_kernel();
32971+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
32972 = mc13892_vcam_set_mode;
32973- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32974+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
32975 = mc13892_vcam_get_mode;
32976+ pax_close_kernel();
32977 for (i = 0; i < pdata->num_regulators; i++) {
32978 init_data = &pdata->regulators[i];
32979 priv->regulators[i] = regulator_register(
32980diff -urNp linux-3.0.7/drivers/rtc/rtc-dev.c linux-3.0.7/drivers/rtc/rtc-dev.c
32981--- linux-3.0.7/drivers/rtc/rtc-dev.c 2011-07-21 22:17:23.000000000 -0400
32982+++ linux-3.0.7/drivers/rtc/rtc-dev.c 2011-08-23 21:48:14.000000000 -0400
32983@@ -14,6 +14,7 @@
32984 #include <linux/module.h>
32985 #include <linux/rtc.h>
32986 #include <linux/sched.h>
32987+#include <linux/grsecurity.h>
32988 #include "rtc-core.h"
32989
32990 static dev_t rtc_devt;
32991@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
32992 if (copy_from_user(&tm, uarg, sizeof(tm)))
32993 return -EFAULT;
32994
32995+ gr_log_timechange();
32996+
32997 return rtc_set_time(rtc, &tm);
32998
32999 case RTC_PIE_ON:
33000diff -urNp linux-3.0.7/drivers/scsi/aacraid/aacraid.h linux-3.0.7/drivers/scsi/aacraid/aacraid.h
33001--- linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-07-21 22:17:23.000000000 -0400
33002+++ linux-3.0.7/drivers/scsi/aacraid/aacraid.h 2011-08-23 21:47:55.000000000 -0400
33003@@ -492,7 +492,7 @@ struct adapter_ops
33004 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33005 /* Administrative operations */
33006 int (*adapter_comm)(struct aac_dev * dev, int comm);
33007-};
33008+} __no_const;
33009
33010 /*
33011 * Define which interrupt handler needs to be installed
33012diff -urNp linux-3.0.7/drivers/scsi/aacraid/commctrl.c linux-3.0.7/drivers/scsi/aacraid/commctrl.c
33013--- linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-07-21 22:17:23.000000000 -0400
33014+++ linux-3.0.7/drivers/scsi/aacraid/commctrl.c 2011-08-23 21:48:14.000000000 -0400
33015@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33016 u32 actual_fibsize64, actual_fibsize = 0;
33017 int i;
33018
33019+ pax_track_stack();
33020
33021 if (dev->in_reset) {
33022 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33023diff -urNp linux-3.0.7/drivers/scsi/aacraid/linit.c linux-3.0.7/drivers/scsi/aacraid/linit.c
33024--- linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-07-21 22:17:23.000000000 -0400
33025+++ linux-3.0.7/drivers/scsi/aacraid/linit.c 2011-10-11 10:44:33.000000000 -0400
33026@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33027 #elif defined(__devinitconst)
33028 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33029 #else
33030-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33031+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33032 #endif
33033 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33034 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33035diff -urNp linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c
33036--- linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-07-21 22:17:23.000000000 -0400
33037+++ linux-3.0.7/drivers/scsi/aic94xx/aic94xx_init.c 2011-10-11 10:44:33.000000000 -0400
33038@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33039 .lldd_control_phy = asd_control_phy,
33040 };
33041
33042-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33043+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33044 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33045 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33046 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33047diff -urNp linux-3.0.7/drivers/scsi/bfa/bfad.c linux-3.0.7/drivers/scsi/bfa/bfad.c
33048--- linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-07-21 22:17:23.000000000 -0400
33049+++ linux-3.0.7/drivers/scsi/bfa/bfad.c 2011-08-23 21:48:14.000000000 -0400
33050@@ -1032,6 +1032,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33051 struct bfad_vport_s *vport, *vport_new;
33052 struct bfa_fcs_driver_info_s driver_info;
33053
33054+ pax_track_stack();
33055+
33056 /* Fill the driver_info info to fcs*/
33057 memset(&driver_info, 0, sizeof(driver_info));
33058 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
33059diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c
33060--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-07-21 22:17:23.000000000 -0400
33061+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-23 21:48:14.000000000 -0400
33062@@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33063 u16 len, count;
33064 u16 templen;
33065
33066+ pax_track_stack();
33067+
33068 /*
33069 * get hba attributes
33070 */
33071@@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33072 u8 count = 0;
33073 u16 templen;
33074
33075+ pax_track_stack();
33076+
33077 /*
33078 * get port attributes
33079 */
33080diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c
33081--- linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-07-21 22:17:23.000000000 -0400
33082+++ linux-3.0.7/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-23 21:48:14.000000000 -0400
33083@@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33084 struct fc_rpsc_speed_info_s speeds;
33085 struct bfa_port_attr_s pport_attr;
33086
33087+ pax_track_stack();
33088+
33089 bfa_trc(port->fcs, rx_fchs->s_id);
33090 bfa_trc(port->fcs, rx_fchs->d_id);
33091
33092diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa.h linux-3.0.7/drivers/scsi/bfa/bfa.h
33093--- linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-07-21 22:17:23.000000000 -0400
33094+++ linux-3.0.7/drivers/scsi/bfa/bfa.h 2011-08-23 21:47:55.000000000 -0400
33095@@ -238,7 +238,7 @@ struct bfa_hwif_s {
33096 u32 *nvecs, u32 *maxvec);
33097 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
33098 u32 *end);
33099-};
33100+} __no_const;
33101 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33102
33103 struct bfa_iocfc_s {
33104diff -urNp linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h
33105--- linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-07-21 22:17:23.000000000 -0400
33106+++ linux-3.0.7/drivers/scsi/bfa/bfa_ioc.h 2011-08-23 21:47:55.000000000 -0400
33107@@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
33108 bfa_ioc_disable_cbfn_t disable_cbfn;
33109 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33110 bfa_ioc_reset_cbfn_t reset_cbfn;
33111-};
33112+} __no_const;
33113
33114 /*
33115 * Heartbeat failure notification queue element.
33116@@ -268,7 +268,7 @@ struct bfa_ioc_hwif_s {
33117 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
33118 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33119 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33120-};
33121+} __no_const;
33122
33123 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
33124 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
33125diff -urNp linux-3.0.7/drivers/scsi/BusLogic.c linux-3.0.7/drivers/scsi/BusLogic.c
33126--- linux-3.0.7/drivers/scsi/BusLogic.c 2011-07-21 22:17:23.000000000 -0400
33127+++ linux-3.0.7/drivers/scsi/BusLogic.c 2011-08-23 21:48:14.000000000 -0400
33128@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33129 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33130 *PrototypeHostAdapter)
33131 {
33132+ pax_track_stack();
33133+
33134 /*
33135 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33136 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33137diff -urNp linux-3.0.7/drivers/scsi/dpt_i2o.c linux-3.0.7/drivers/scsi/dpt_i2o.c
33138--- linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-07-21 22:17:23.000000000 -0400
33139+++ linux-3.0.7/drivers/scsi/dpt_i2o.c 2011-08-23 21:48:14.000000000 -0400
33140@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33141 dma_addr_t addr;
33142 ulong flags = 0;
33143
33144+ pax_track_stack();
33145+
33146 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33147 // get user msg size in u32s
33148 if(get_user(size, &user_msg[0])){
33149@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33150 s32 rcode;
33151 dma_addr_t addr;
33152
33153+ pax_track_stack();
33154+
33155 memset(msg, 0 , sizeof(msg));
33156 len = scsi_bufflen(cmd);
33157 direction = 0x00000000;
33158diff -urNp linux-3.0.7/drivers/scsi/eata.c linux-3.0.7/drivers/scsi/eata.c
33159--- linux-3.0.7/drivers/scsi/eata.c 2011-07-21 22:17:23.000000000 -0400
33160+++ linux-3.0.7/drivers/scsi/eata.c 2011-08-23 21:48:14.000000000 -0400
33161@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33162 struct hostdata *ha;
33163 char name[16];
33164
33165+ pax_track_stack();
33166+
33167 sprintf(name, "%s%d", driver_name, j);
33168
33169 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33170diff -urNp linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c
33171--- linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-07-21 22:17:23.000000000 -0400
33172+++ linux-3.0.7/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-23 21:48:14.000000000 -0400
33173@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33174 } buf;
33175 int rc;
33176
33177+ pax_track_stack();
33178+
33179 fiph = (struct fip_header *)skb->data;
33180 sub = fiph->fip_subcode;
33181
33182diff -urNp linux-3.0.7/drivers/scsi/gdth.c linux-3.0.7/drivers/scsi/gdth.c
33183--- linux-3.0.7/drivers/scsi/gdth.c 2011-07-21 22:17:23.000000000 -0400
33184+++ linux-3.0.7/drivers/scsi/gdth.c 2011-08-23 21:48:14.000000000 -0400
33185@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33186 unsigned long flags;
33187 gdth_ha_str *ha;
33188
33189+ pax_track_stack();
33190+
33191 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33192 return -EFAULT;
33193 ha = gdth_find_ha(ldrv.ionode);
33194@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33195 gdth_ha_str *ha;
33196 int rval;
33197
33198+ pax_track_stack();
33199+
33200 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33201 res.number >= MAX_HDRIVES)
33202 return -EFAULT;
33203@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33204 gdth_ha_str *ha;
33205 int rval;
33206
33207+ pax_track_stack();
33208+
33209 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33210 return -EFAULT;
33211 ha = gdth_find_ha(gen.ionode);
33212@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33213 int i;
33214 gdth_cmd_str gdtcmd;
33215 char cmnd[MAX_COMMAND_SIZE];
33216+
33217+ pax_track_stack();
33218+
33219 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33220
33221 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33222diff -urNp linux-3.0.7/drivers/scsi/gdth_proc.c linux-3.0.7/drivers/scsi/gdth_proc.c
33223--- linux-3.0.7/drivers/scsi/gdth_proc.c 2011-07-21 22:17:23.000000000 -0400
33224+++ linux-3.0.7/drivers/scsi/gdth_proc.c 2011-08-23 21:48:14.000000000 -0400
33225@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33226 u64 paddr;
33227
33228 char cmnd[MAX_COMMAND_SIZE];
33229+
33230+ pax_track_stack();
33231+
33232 memset(cmnd, 0xff, 12);
33233 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33234
33235@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33236 gdth_hget_str *phg;
33237 char cmnd[MAX_COMMAND_SIZE];
33238
33239+ pax_track_stack();
33240+
33241 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33242 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33243 if (!gdtcmd || !estr)
33244diff -urNp linux-3.0.7/drivers/scsi/hosts.c linux-3.0.7/drivers/scsi/hosts.c
33245--- linux-3.0.7/drivers/scsi/hosts.c 2011-07-21 22:17:23.000000000 -0400
33246+++ linux-3.0.7/drivers/scsi/hosts.c 2011-08-23 21:47:55.000000000 -0400
33247@@ -42,7 +42,7 @@
33248 #include "scsi_logging.h"
33249
33250
33251-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33252+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33253
33254
33255 static void scsi_host_cls_release(struct device *dev)
33256@@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33257 * subtract one because we increment first then return, but we need to
33258 * know what the next host number was before increment
33259 */
33260- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33261+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33262 shost->dma_channel = 0xff;
33263
33264 /* These three are default values which can be overridden */
33265diff -urNp linux-3.0.7/drivers/scsi/hpsa.c linux-3.0.7/drivers/scsi/hpsa.c
33266--- linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:54:54.000000000 -0400
33267+++ linux-3.0.7/drivers/scsi/hpsa.c 2011-10-16 21:55:27.000000000 -0400
33268@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33269 u32 a;
33270
33271 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33272- return h->access.command_completed(h);
33273+ return h->access->command_completed(h);
33274
33275 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33276 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33277@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33278 while (!list_empty(&h->reqQ)) {
33279 c = list_entry(h->reqQ.next, struct CommandList, list);
33280 /* can't do anything if fifo is full */
33281- if ((h->access.fifo_full(h))) {
33282+ if ((h->access->fifo_full(h))) {
33283 dev_warn(&h->pdev->dev, "fifo full\n");
33284 break;
33285 }
33286@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33287 h->Qdepth--;
33288
33289 /* Tell the controller execute command */
33290- h->access.submit_command(h, c);
33291+ h->access->submit_command(h, c);
33292
33293 /* Put job onto the completed Q */
33294 addQ(&h->cmpQ, c);
33295@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33296
33297 static inline unsigned long get_next_completion(struct ctlr_info *h)
33298 {
33299- return h->access.command_completed(h);
33300+ return h->access->command_completed(h);
33301 }
33302
33303 static inline bool interrupt_pending(struct ctlr_info *h)
33304 {
33305- return h->access.intr_pending(h);
33306+ return h->access->intr_pending(h);
33307 }
33308
33309 static inline long interrupt_not_for_us(struct ctlr_info *h)
33310 {
33311- return (h->access.intr_pending(h) == 0) ||
33312+ return (h->access->intr_pending(h) == 0) ||
33313 (h->interrupts_enabled == 0);
33314 }
33315
33316@@ -3874,7 +3874,7 @@ static int __devinit hpsa_pci_init(struc
33317 if (prod_index < 0)
33318 return -ENODEV;
33319 h->product_name = products[prod_index].product_name;
33320- h->access = *(products[prod_index].access);
33321+ h->access = products[prod_index].access;
33322
33323 if (hpsa_board_disabled(h->pdev)) {
33324 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33325@@ -4151,7 +4151,7 @@ reinit_after_soft_reset:
33326 }
33327
33328 /* make sure the board interrupts are off */
33329- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33330+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33331
33332 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33333 goto clean2;
33334@@ -4185,7 +4185,7 @@ reinit_after_soft_reset:
33335 * fake ones to scoop up any residual completions.
33336 */
33337 spin_lock_irqsave(&h->lock, flags);
33338- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33339+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33340 spin_unlock_irqrestore(&h->lock, flags);
33341 free_irq(h->intr[h->intr_mode], h);
33342 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33343@@ -4204,9 +4204,9 @@ reinit_after_soft_reset:
33344 dev_info(&h->pdev->dev, "Board READY.\n");
33345 dev_info(&h->pdev->dev,
33346 "Waiting for stale completions to drain.\n");
33347- h->access.set_intr_mask(h, HPSA_INTR_ON);
33348+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33349 msleep(10000);
33350- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33351+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33352
33353 rc = controller_reset_failed(h->cfgtable);
33354 if (rc)
33355@@ -4227,7 +4227,7 @@ reinit_after_soft_reset:
33356 }
33357
33358 /* Turn the interrupts on so we can service requests */
33359- h->access.set_intr_mask(h, HPSA_INTR_ON);
33360+ h->access->set_intr_mask(h, HPSA_INTR_ON);
33361
33362 hpsa_hba_inquiry(h);
33363 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33364@@ -4280,7 +4280,7 @@ static void hpsa_shutdown(struct pci_dev
33365 * To write all data in the battery backed cache to disks
33366 */
33367 hpsa_flush_cache(h);
33368- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33369+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33370 free_irq(h->intr[h->intr_mode], h);
33371 #ifdef CONFIG_PCI_MSI
33372 if (h->msix_vector)
33373@@ -4443,7 +4443,7 @@ static __devinit void hpsa_enter_perform
33374 return;
33375 }
33376 /* Change the access methods to the performant access methods */
33377- h->access = SA5_performant_access;
33378+ h->access = &SA5_performant_access;
33379 h->transMethod = CFGTBL_Trans_Performant;
33380 }
33381
33382diff -urNp linux-3.0.7/drivers/scsi/hpsa.h linux-3.0.7/drivers/scsi/hpsa.h
33383--- linux-3.0.7/drivers/scsi/hpsa.h 2011-09-02 18:11:21.000000000 -0400
33384+++ linux-3.0.7/drivers/scsi/hpsa.h 2011-08-23 21:47:55.000000000 -0400
33385@@ -73,7 +73,7 @@ struct ctlr_info {
33386 unsigned int msix_vector;
33387 unsigned int msi_vector;
33388 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33389- struct access_method access;
33390+ struct access_method *access;
33391
33392 /* queue and queue Info */
33393 struct list_head reqQ;
33394diff -urNp linux-3.0.7/drivers/scsi/ips.h linux-3.0.7/drivers/scsi/ips.h
33395--- linux-3.0.7/drivers/scsi/ips.h 2011-07-21 22:17:23.000000000 -0400
33396+++ linux-3.0.7/drivers/scsi/ips.h 2011-08-23 21:47:55.000000000 -0400
33397@@ -1027,7 +1027,7 @@ typedef struct {
33398 int (*intr)(struct ips_ha *);
33399 void (*enableint)(struct ips_ha *);
33400 uint32_t (*statupd)(struct ips_ha *);
33401-} ips_hw_func_t;
33402+} __no_const ips_hw_func_t;
33403
33404 typedef struct ips_ha {
33405 uint8_t ha_id[IPS_MAX_CHANNELS+1];
33406diff -urNp linux-3.0.7/drivers/scsi/libfc/fc_exch.c linux-3.0.7/drivers/scsi/libfc/fc_exch.c
33407--- linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-07-21 22:17:23.000000000 -0400
33408+++ linux-3.0.7/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:47:55.000000000 -0400
33409@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33410 * all together if not used XXX
33411 */
33412 struct {
33413- atomic_t no_free_exch;
33414- atomic_t no_free_exch_xid;
33415- atomic_t xid_not_found;
33416- atomic_t xid_busy;
33417- atomic_t seq_not_found;
33418- atomic_t non_bls_resp;
33419+ atomic_unchecked_t no_free_exch;
33420+ atomic_unchecked_t no_free_exch_xid;
33421+ atomic_unchecked_t xid_not_found;
33422+ atomic_unchecked_t xid_busy;
33423+ atomic_unchecked_t seq_not_found;
33424+ atomic_unchecked_t non_bls_resp;
33425 } stats;
33426 };
33427
33428@@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
33429 /* allocate memory for exchange */
33430 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33431 if (!ep) {
33432- atomic_inc(&mp->stats.no_free_exch);
33433+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33434 goto out;
33435 }
33436 memset(ep, 0, sizeof(*ep));
33437@@ -761,7 +761,7 @@ out:
33438 return ep;
33439 err:
33440 spin_unlock_bh(&pool->lock);
33441- atomic_inc(&mp->stats.no_free_exch_xid);
33442+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33443 mempool_free(ep, mp->ep_pool);
33444 return NULL;
33445 }
33446@@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33447 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33448 ep = fc_exch_find(mp, xid);
33449 if (!ep) {
33450- atomic_inc(&mp->stats.xid_not_found);
33451+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33452 reject = FC_RJT_OX_ID;
33453 goto out;
33454 }
33455@@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33456 ep = fc_exch_find(mp, xid);
33457 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33458 if (ep) {
33459- atomic_inc(&mp->stats.xid_busy);
33460+ atomic_inc_unchecked(&mp->stats.xid_busy);
33461 reject = FC_RJT_RX_ID;
33462 goto rel;
33463 }
33464@@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33465 }
33466 xid = ep->xid; /* get our XID */
33467 } else if (!ep) {
33468- atomic_inc(&mp->stats.xid_not_found);
33469+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33470 reject = FC_RJT_RX_ID; /* XID not found */
33471 goto out;
33472 }
33473@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33474 } else {
33475 sp = &ep->seq;
33476 if (sp->id != fh->fh_seq_id) {
33477- atomic_inc(&mp->stats.seq_not_found);
33478+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33479 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33480 goto rel;
33481 }
33482@@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
33483
33484 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33485 if (!ep) {
33486- atomic_inc(&mp->stats.xid_not_found);
33487+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33488 goto out;
33489 }
33490 if (ep->esb_stat & ESB_ST_COMPLETE) {
33491- atomic_inc(&mp->stats.xid_not_found);
33492+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33493 goto rel;
33494 }
33495 if (ep->rxid == FC_XID_UNKNOWN)
33496 ep->rxid = ntohs(fh->fh_rx_id);
33497 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33498- atomic_inc(&mp->stats.xid_not_found);
33499+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33500 goto rel;
33501 }
33502 if (ep->did != ntoh24(fh->fh_s_id) &&
33503 ep->did != FC_FID_FLOGI) {
33504- atomic_inc(&mp->stats.xid_not_found);
33505+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33506 goto rel;
33507 }
33508 sof = fr_sof(fp);
33509@@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
33510 sp->ssb_stat |= SSB_ST_RESP;
33511 sp->id = fh->fh_seq_id;
33512 } else if (sp->id != fh->fh_seq_id) {
33513- atomic_inc(&mp->stats.seq_not_found);
33514+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33515 goto rel;
33516 }
33517
33518@@ -1480,9 +1480,9 @@ static void fc_exch_recv_resp(struct fc_
33519 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33520
33521 if (!sp)
33522- atomic_inc(&mp->stats.xid_not_found);
33523+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33524 else
33525- atomic_inc(&mp->stats.non_bls_resp);
33526+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
33527
33528 fc_frame_free(fp);
33529 }
33530diff -urNp linux-3.0.7/drivers/scsi/libsas/sas_ata.c linux-3.0.7/drivers/scsi/libsas/sas_ata.c
33531--- linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-07-21 22:17:23.000000000 -0400
33532+++ linux-3.0.7/drivers/scsi/libsas/sas_ata.c 2011-08-23 21:47:55.000000000 -0400
33533@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
33534 .postreset = ata_std_postreset,
33535 .error_handler = ata_std_error_handler,
33536 .post_internal_cmd = sas_ata_post_internal,
33537- .qc_defer = ata_std_qc_defer,
33538+ .qc_defer = ata_std_qc_defer,
33539 .qc_prep = ata_noop_qc_prep,
33540 .qc_issue = sas_ata_qc_issue,
33541 .qc_fill_rtf = sas_ata_qc_fill_rtf,
33542diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c
33543--- linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-07-21 22:17:23.000000000 -0400
33544+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-23 21:48:14.000000000 -0400
33545@@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
33546
33547 #include <linux/debugfs.h>
33548
33549-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33550+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33551 static unsigned long lpfc_debugfs_start_time = 0L;
33552
33553 /* iDiag */
33554@@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33555 lpfc_debugfs_enable = 0;
33556
33557 len = 0;
33558- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33559+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33560 (lpfc_debugfs_max_disc_trc - 1);
33561 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33562 dtp = vport->disc_trc + i;
33563@@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33564 lpfc_debugfs_enable = 0;
33565
33566 len = 0;
33567- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33568+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33569 (lpfc_debugfs_max_slow_ring_trc - 1);
33570 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33571 dtp = phba->slow_ring_trc + i;
33572@@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33573 uint32_t *ptr;
33574 char buffer[1024];
33575
33576+ pax_track_stack();
33577+
33578 off = 0;
33579 spin_lock_irq(&phba->hbalock);
33580
33581@@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33582 !vport || !vport->disc_trc)
33583 return;
33584
33585- index = atomic_inc_return(&vport->disc_trc_cnt) &
33586+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33587 (lpfc_debugfs_max_disc_trc - 1);
33588 dtp = vport->disc_trc + index;
33589 dtp->fmt = fmt;
33590 dtp->data1 = data1;
33591 dtp->data2 = data2;
33592 dtp->data3 = data3;
33593- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33594+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33595 dtp->jif = jiffies;
33596 #endif
33597 return;
33598@@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33599 !phba || !phba->slow_ring_trc)
33600 return;
33601
33602- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33603+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33604 (lpfc_debugfs_max_slow_ring_trc - 1);
33605 dtp = phba->slow_ring_trc + index;
33606 dtp->fmt = fmt;
33607 dtp->data1 = data1;
33608 dtp->data2 = data2;
33609 dtp->data3 = data3;
33610- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33611+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33612 dtp->jif = jiffies;
33613 #endif
33614 return;
33615@@ -2606,7 +2608,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33616 "slow_ring buffer\n");
33617 goto debug_failed;
33618 }
33619- atomic_set(&phba->slow_ring_trc_cnt, 0);
33620+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33621 memset(phba->slow_ring_trc, 0,
33622 (sizeof(struct lpfc_debugfs_trc) *
33623 lpfc_debugfs_max_slow_ring_trc));
33624@@ -2652,7 +2654,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33625 "buffer\n");
33626 goto debug_failed;
33627 }
33628- atomic_set(&vport->disc_trc_cnt, 0);
33629+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33630
33631 snprintf(name, sizeof(name), "discovery_trace");
33632 vport->debug_disc_trc =
33633diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc.h linux-3.0.7/drivers/scsi/lpfc/lpfc.h
33634--- linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:54:54.000000000 -0400
33635+++ linux-3.0.7/drivers/scsi/lpfc/lpfc.h 2011-10-16 21:55:27.000000000 -0400
33636@@ -425,7 +425,7 @@ struct lpfc_vport {
33637 struct dentry *debug_nodelist;
33638 struct dentry *vport_debugfs_root;
33639 struct lpfc_debugfs_trc *disc_trc;
33640- atomic_t disc_trc_cnt;
33641+ atomic_unchecked_t disc_trc_cnt;
33642 #endif
33643 uint8_t stat_data_enabled;
33644 uint8_t stat_data_blocked;
33645@@ -832,8 +832,8 @@ struct lpfc_hba {
33646 struct timer_list fabric_block_timer;
33647 unsigned long bit_flags;
33648 #define FABRIC_COMANDS_BLOCKED 0
33649- atomic_t num_rsrc_err;
33650- atomic_t num_cmd_success;
33651+ atomic_unchecked_t num_rsrc_err;
33652+ atomic_unchecked_t num_cmd_success;
33653 unsigned long last_rsrc_error_time;
33654 unsigned long last_ramp_down_time;
33655 unsigned long last_ramp_up_time;
33656@@ -847,7 +847,7 @@ struct lpfc_hba {
33657 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33658 struct dentry *debug_slow_ring_trc;
33659 struct lpfc_debugfs_trc *slow_ring_trc;
33660- atomic_t slow_ring_trc_cnt;
33661+ atomic_unchecked_t slow_ring_trc_cnt;
33662 /* iDiag debugfs sub-directory */
33663 struct dentry *idiag_root;
33664 struct dentry *idiag_pci_cfg;
33665diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c
33666--- linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:54:54.000000000 -0400
33667+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_init.c 2011-10-16 21:55:27.000000000 -0400
33668@@ -9971,8 +9971,10 @@ lpfc_init(void)
33669 printk(LPFC_COPYRIGHT "\n");
33670
33671 if (lpfc_enable_npiv) {
33672- lpfc_transport_functions.vport_create = lpfc_vport_create;
33673- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33674+ pax_open_kernel();
33675+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
33676+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
33677+ pax_close_kernel();
33678 }
33679 lpfc_transport_template =
33680 fc_attach_transport(&lpfc_transport_functions);
33681diff -urNp linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c
33682--- linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:54:54.000000000 -0400
33683+++ linux-3.0.7/drivers/scsi/lpfc/lpfc_scsi.c 2011-10-16 21:55:27.000000000 -0400
33684@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33685 uint32_t evt_posted;
33686
33687 spin_lock_irqsave(&phba->hbalock, flags);
33688- atomic_inc(&phba->num_rsrc_err);
33689+ atomic_inc_unchecked(&phba->num_rsrc_err);
33690 phba->last_rsrc_error_time = jiffies;
33691
33692 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33693@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33694 unsigned long flags;
33695 struct lpfc_hba *phba = vport->phba;
33696 uint32_t evt_posted;
33697- atomic_inc(&phba->num_cmd_success);
33698+ atomic_inc_unchecked(&phba->num_cmd_success);
33699
33700 if (vport->cfg_lun_queue_depth <= queue_depth)
33701 return;
33702@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33703 unsigned long num_rsrc_err, num_cmd_success;
33704 int i;
33705
33706- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33707- num_cmd_success = atomic_read(&phba->num_cmd_success);
33708+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33709+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33710
33711 vports = lpfc_create_vport_work_array(phba);
33712 if (vports != NULL)
33713@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33714 }
33715 }
33716 lpfc_destroy_vport_work_array(phba, vports);
33717- atomic_set(&phba->num_rsrc_err, 0);
33718- atomic_set(&phba->num_cmd_success, 0);
33719+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33720+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33721 }
33722
33723 /**
33724@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33725 }
33726 }
33727 lpfc_destroy_vport_work_array(phba, vports);
33728- atomic_set(&phba->num_rsrc_err, 0);
33729- atomic_set(&phba->num_cmd_success, 0);
33730+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33731+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33732 }
33733
33734 /**
33735diff -urNp linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c
33736--- linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-07-21 22:17:23.000000000 -0400
33737+++ linux-3.0.7/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-23 21:48:14.000000000 -0400
33738@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33739 int rval;
33740 int i;
33741
33742+ pax_track_stack();
33743+
33744 // Allocate memory for the base list of scb for management module.
33745 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33746
33747diff -urNp linux-3.0.7/drivers/scsi/osd/osd_initiator.c linux-3.0.7/drivers/scsi/osd/osd_initiator.c
33748--- linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-07-21 22:17:23.000000000 -0400
33749+++ linux-3.0.7/drivers/scsi/osd/osd_initiator.c 2011-08-23 21:48:14.000000000 -0400
33750@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
33751 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33752 int ret;
33753
33754+ pax_track_stack();
33755+
33756 or = osd_start_request(od, GFP_KERNEL);
33757 if (!or)
33758 return -ENOMEM;
33759diff -urNp linux-3.0.7/drivers/scsi/pmcraid.c linux-3.0.7/drivers/scsi/pmcraid.c
33760--- linux-3.0.7/drivers/scsi/pmcraid.c 2011-09-02 18:11:21.000000000 -0400
33761+++ linux-3.0.7/drivers/scsi/pmcraid.c 2011-08-23 21:47:56.000000000 -0400
33762@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
33763 res->scsi_dev = scsi_dev;
33764 scsi_dev->hostdata = res;
33765 res->change_detected = 0;
33766- atomic_set(&res->read_failures, 0);
33767- atomic_set(&res->write_failures, 0);
33768+ atomic_set_unchecked(&res->read_failures, 0);
33769+ atomic_set_unchecked(&res->write_failures, 0);
33770 rc = 0;
33771 }
33772 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33773@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
33774
33775 /* If this was a SCSI read/write command keep count of errors */
33776 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33777- atomic_inc(&res->read_failures);
33778+ atomic_inc_unchecked(&res->read_failures);
33779 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33780- atomic_inc(&res->write_failures);
33781+ atomic_inc_unchecked(&res->write_failures);
33782
33783 if (!RES_IS_GSCSI(res->cfg_entry) &&
33784 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33785@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
33786 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33787 * hrrq_id assigned here in queuecommand
33788 */
33789- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33790+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33791 pinstance->num_hrrq;
33792 cmd->cmd_done = pmcraid_io_done;
33793
33794@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
33795 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
33796 * hrrq_id assigned here in queuecommand
33797 */
33798- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
33799+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
33800 pinstance->num_hrrq;
33801
33802 if (request_size) {
33803@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
33804
33805 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33806 /* add resources only after host is added into system */
33807- if (!atomic_read(&pinstance->expose_resources))
33808+ if (!atomic_read_unchecked(&pinstance->expose_resources))
33809 return;
33810
33811 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
33812@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
33813 init_waitqueue_head(&pinstance->reset_wait_q);
33814
33815 atomic_set(&pinstance->outstanding_cmds, 0);
33816- atomic_set(&pinstance->last_message_id, 0);
33817- atomic_set(&pinstance->expose_resources, 0);
33818+ atomic_set_unchecked(&pinstance->last_message_id, 0);
33819+ atomic_set_unchecked(&pinstance->expose_resources, 0);
33820
33821 INIT_LIST_HEAD(&pinstance->free_res_q);
33822 INIT_LIST_HEAD(&pinstance->used_res_q);
33823@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
33824 /* Schedule worker thread to handle CCN and take care of adding and
33825 * removing devices to OS
33826 */
33827- atomic_set(&pinstance->expose_resources, 1);
33828+ atomic_set_unchecked(&pinstance->expose_resources, 1);
33829 schedule_work(&pinstance->worker_q);
33830 return rc;
33831
33832diff -urNp linux-3.0.7/drivers/scsi/pmcraid.h linux-3.0.7/drivers/scsi/pmcraid.h
33833--- linux-3.0.7/drivers/scsi/pmcraid.h 2011-07-21 22:17:23.000000000 -0400
33834+++ linux-3.0.7/drivers/scsi/pmcraid.h 2011-08-23 21:47:56.000000000 -0400
33835@@ -749,7 +749,7 @@ struct pmcraid_instance {
33836 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
33837
33838 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
33839- atomic_t last_message_id;
33840+ atomic_unchecked_t last_message_id;
33841
33842 /* configuration table */
33843 struct pmcraid_config_table *cfg_table;
33844@@ -778,7 +778,7 @@ struct pmcraid_instance {
33845 atomic_t outstanding_cmds;
33846
33847 /* should add/delete resources to mid-layer now ?*/
33848- atomic_t expose_resources;
33849+ atomic_unchecked_t expose_resources;
33850
33851
33852
33853@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
33854 struct pmcraid_config_table_entry_ext cfg_entry_ext;
33855 };
33856 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33857- atomic_t read_failures; /* count of failed READ commands */
33858- atomic_t write_failures; /* count of failed WRITE commands */
33859+ atomic_unchecked_t read_failures; /* count of failed READ commands */
33860+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33861
33862 /* To indicate add/delete/modify during CCN */
33863 u8 change_detected;
33864diff -urNp linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h
33865--- linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-07-21 22:17:23.000000000 -0400
33866+++ linux-3.0.7/drivers/scsi/qla2xxx/qla_def.h 2011-08-23 21:47:56.000000000 -0400
33867@@ -2244,7 +2244,7 @@ struct isp_operations {
33868 int (*get_flash_version) (struct scsi_qla_host *, void *);
33869 int (*start_scsi) (srb_t *);
33870 int (*abort_isp) (struct scsi_qla_host *);
33871-};
33872+} __no_const;
33873
33874 /* MSI-X Support *************************************************************/
33875
33876diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h
33877--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-07-21 22:17:23.000000000 -0400
33878+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_def.h 2011-08-23 21:47:56.000000000 -0400
33879@@ -256,7 +256,7 @@ struct ddb_entry {
33880 atomic_t retry_relogin_timer; /* Min Time between relogins
33881 * (4000 only) */
33882 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33883- atomic_t relogin_retry_count; /* Num of times relogin has been
33884+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33885 * retried */
33886
33887 uint16_t port;
33888diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c
33889--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-07-21 22:17:23.000000000 -0400
33890+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_init.c 2011-08-23 21:47:56.000000000 -0400
33891@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
33892 ddb_entry->fw_ddb_index = fw_ddb_index;
33893 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33894 atomic_set(&ddb_entry->relogin_timer, 0);
33895- atomic_set(&ddb_entry->relogin_retry_count, 0);
33896+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33897 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33898 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33899 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33900@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
33901 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
33902 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
33903 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33904- atomic_set(&ddb_entry->relogin_retry_count, 0);
33905+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33906 atomic_set(&ddb_entry->relogin_timer, 0);
33907 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33908 iscsi_unblock_session(ddb_entry->sess);
33909diff -urNp linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c
33910--- linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-07-21 22:17:23.000000000 -0400
33911+++ linux-3.0.7/drivers/scsi/qla4xxx/ql4_os.c 2011-08-23 21:47:56.000000000 -0400
33912@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
33913 ddb_entry->fw_ddb_device_state ==
33914 DDB_DS_SESSION_FAILED) {
33915 /* Reset retry relogin timer */
33916- atomic_inc(&ddb_entry->relogin_retry_count);
33917+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33918 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
33919 " timed out-retrying"
33920 " relogin (%d)\n",
33921 ha->host_no,
33922 ddb_entry->fw_ddb_index,
33923- atomic_read(&ddb_entry->
33924+ atomic_read_unchecked(&ddb_entry->
33925 relogin_retry_count))
33926 );
33927 start_dpc++;
33928diff -urNp linux-3.0.7/drivers/scsi/scsi.c linux-3.0.7/drivers/scsi/scsi.c
33929--- linux-3.0.7/drivers/scsi/scsi.c 2011-07-21 22:17:23.000000000 -0400
33930+++ linux-3.0.7/drivers/scsi/scsi.c 2011-08-23 21:47:56.000000000 -0400
33931@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33932 unsigned long timeout;
33933 int rtn = 0;
33934
33935- atomic_inc(&cmd->device->iorequest_cnt);
33936+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33937
33938 /* check if the device is still usable */
33939 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33940diff -urNp linux-3.0.7/drivers/scsi/scsi_debug.c linux-3.0.7/drivers/scsi/scsi_debug.c
33941--- linux-3.0.7/drivers/scsi/scsi_debug.c 2011-07-21 22:17:23.000000000 -0400
33942+++ linux-3.0.7/drivers/scsi/scsi_debug.c 2011-08-23 21:48:14.000000000 -0400
33943@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
33944 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33945 unsigned char *cmd = (unsigned char *)scp->cmnd;
33946
33947+ pax_track_stack();
33948+
33949 if ((errsts = check_readiness(scp, 1, devip)))
33950 return errsts;
33951 memset(arr, 0, sizeof(arr));
33952@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
33953 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33954 unsigned char *cmd = (unsigned char *)scp->cmnd;
33955
33956+ pax_track_stack();
33957+
33958 if ((errsts = check_readiness(scp, 1, devip)))
33959 return errsts;
33960 memset(arr, 0, sizeof(arr));
33961diff -urNp linux-3.0.7/drivers/scsi/scsi_lib.c linux-3.0.7/drivers/scsi/scsi_lib.c
33962--- linux-3.0.7/drivers/scsi/scsi_lib.c 2011-09-02 18:11:21.000000000 -0400
33963+++ linux-3.0.7/drivers/scsi/scsi_lib.c 2011-08-23 21:47:56.000000000 -0400
33964@@ -1412,7 +1412,7 @@ static void scsi_kill_request(struct req
33965 shost = sdev->host;
33966 scsi_init_cmd_errh(cmd);
33967 cmd->result = DID_NO_CONNECT << 16;
33968- atomic_inc(&cmd->device->iorequest_cnt);
33969+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33970
33971 /*
33972 * SCSI request completion path will do scsi_device_unbusy(),
33973@@ -1438,9 +1438,9 @@ static void scsi_softirq_done(struct req
33974
33975 INIT_LIST_HEAD(&cmd->eh_entry);
33976
33977- atomic_inc(&cmd->device->iodone_cnt);
33978+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
33979 if (cmd->result)
33980- atomic_inc(&cmd->device->ioerr_cnt);
33981+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
33982
33983 disposition = scsi_decide_disposition(cmd);
33984 if (disposition != SUCCESS &&
33985diff -urNp linux-3.0.7/drivers/scsi/scsi_sysfs.c linux-3.0.7/drivers/scsi/scsi_sysfs.c
33986--- linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-07-21 22:17:23.000000000 -0400
33987+++ linux-3.0.7/drivers/scsi/scsi_sysfs.c 2011-08-23 21:47:56.000000000 -0400
33988@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
33989 char *buf) \
33990 { \
33991 struct scsi_device *sdev = to_scsi_device(dev); \
33992- unsigned long long count = atomic_read(&sdev->field); \
33993+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
33994 return snprintf(buf, 20, "0x%llx\n", count); \
33995 } \
33996 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
33997diff -urNp linux-3.0.7/drivers/scsi/scsi_tgt_lib.c linux-3.0.7/drivers/scsi/scsi_tgt_lib.c
33998--- linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-07-21 22:17:23.000000000 -0400
33999+++ linux-3.0.7/drivers/scsi/scsi_tgt_lib.c 2011-10-06 04:17:55.000000000 -0400
34000@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34001 int err;
34002
34003 dprintk("%lx %u\n", uaddr, len);
34004- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34005+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34006 if (err) {
34007 /*
34008 * TODO: need to fixup sg_tablesize, max_segment_size,
34009diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_fc.c linux-3.0.7/drivers/scsi/scsi_transport_fc.c
34010--- linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-07-21 22:17:23.000000000 -0400
34011+++ linux-3.0.7/drivers/scsi/scsi_transport_fc.c 2011-08-23 21:47:56.000000000 -0400
34012@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34013 * Netlink Infrastructure
34014 */
34015
34016-static atomic_t fc_event_seq;
34017+static atomic_unchecked_t fc_event_seq;
34018
34019 /**
34020 * fc_get_event_number - Obtain the next sequential FC event number
34021@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34022 u32
34023 fc_get_event_number(void)
34024 {
34025- return atomic_add_return(1, &fc_event_seq);
34026+ return atomic_add_return_unchecked(1, &fc_event_seq);
34027 }
34028 EXPORT_SYMBOL(fc_get_event_number);
34029
34030@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34031 {
34032 int error;
34033
34034- atomic_set(&fc_event_seq, 0);
34035+ atomic_set_unchecked(&fc_event_seq, 0);
34036
34037 error = transport_class_register(&fc_host_class);
34038 if (error)
34039@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34040 char *cp;
34041
34042 *val = simple_strtoul(buf, &cp, 0);
34043- if ((*cp && (*cp != '\n')) || (*val < 0))
34044+ if (*cp && (*cp != '\n'))
34045 return -EINVAL;
34046 /*
34047 * Check for overflow; dev_loss_tmo is u32
34048diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c
34049--- linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-07-21 22:17:23.000000000 -0400
34050+++ linux-3.0.7/drivers/scsi/scsi_transport_iscsi.c 2011-08-23 21:47:56.000000000 -0400
34051@@ -83,7 +83,7 @@ struct iscsi_internal {
34052 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34053 };
34054
34055-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34056+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34057 static struct workqueue_struct *iscsi_eh_timer_workq;
34058
34059 /*
34060@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34061 int err;
34062
34063 ihost = shost->shost_data;
34064- session->sid = atomic_add_return(1, &iscsi_session_nr);
34065+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34066
34067 if (id == ISCSI_MAX_TARGET) {
34068 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34069@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34070 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34071 ISCSI_TRANSPORT_VERSION);
34072
34073- atomic_set(&iscsi_session_nr, 0);
34074+ atomic_set_unchecked(&iscsi_session_nr, 0);
34075
34076 err = class_register(&iscsi_transport_class);
34077 if (err)
34078diff -urNp linux-3.0.7/drivers/scsi/scsi_transport_srp.c linux-3.0.7/drivers/scsi/scsi_transport_srp.c
34079--- linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-07-21 22:17:23.000000000 -0400
34080+++ linux-3.0.7/drivers/scsi/scsi_transport_srp.c 2011-08-23 21:47:56.000000000 -0400
34081@@ -33,7 +33,7 @@
34082 #include "scsi_transport_srp_internal.h"
34083
34084 struct srp_host_attrs {
34085- atomic_t next_port_id;
34086+ atomic_unchecked_t next_port_id;
34087 };
34088 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34089
34090@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34091 struct Scsi_Host *shost = dev_to_shost(dev);
34092 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34093
34094- atomic_set(&srp_host->next_port_id, 0);
34095+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34096 return 0;
34097 }
34098
34099@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34100 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34101 rport->roles = ids->roles;
34102
34103- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34104+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34105 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34106
34107 transport_setup_device(&rport->dev);
34108diff -urNp linux-3.0.7/drivers/scsi/sg.c linux-3.0.7/drivers/scsi/sg.c
34109--- linux-3.0.7/drivers/scsi/sg.c 2011-07-21 22:17:23.000000000 -0400
34110+++ linux-3.0.7/drivers/scsi/sg.c 2011-10-06 04:17:55.000000000 -0400
34111@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34112 sdp->disk->disk_name,
34113 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34114 NULL,
34115- (char *)arg);
34116+ (char __user *)arg);
34117 case BLKTRACESTART:
34118 return blk_trace_startstop(sdp->device->request_queue, 1);
34119 case BLKTRACESTOP:
34120@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34121 const struct file_operations * fops;
34122 };
34123
34124-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34125+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34126 {"allow_dio", &adio_fops},
34127 {"debug", &debug_fops},
34128 {"def_reserved_size", &dressz_fops},
34129@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34130 {
34131 int k, mask;
34132 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34133- struct sg_proc_leaf * leaf;
34134+ const struct sg_proc_leaf * leaf;
34135
34136 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34137 if (!sg_proc_sgp)
34138diff -urNp linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c
34139--- linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-07-21 22:17:23.000000000 -0400
34140+++ linux-3.0.7/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-23 21:48:14.000000000 -0400
34141@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34142 int do_iounmap = 0;
34143 int do_disable_device = 1;
34144
34145+ pax_track_stack();
34146+
34147 memset(&sym_dev, 0, sizeof(sym_dev));
34148 memset(&nvram, 0, sizeof(nvram));
34149 sym_dev.pdev = pdev;
34150diff -urNp linux-3.0.7/drivers/scsi/vmw_pvscsi.c linux-3.0.7/drivers/scsi/vmw_pvscsi.c
34151--- linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-07-21 22:17:23.000000000 -0400
34152+++ linux-3.0.7/drivers/scsi/vmw_pvscsi.c 2011-08-23 21:48:14.000000000 -0400
34153@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34154 dma_addr_t base;
34155 unsigned i;
34156
34157+ pax_track_stack();
34158+
34159 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34160 cmd.reqRingNumPages = adapter->req_pages;
34161 cmd.cmpRingNumPages = adapter->cmp_pages;
34162diff -urNp linux-3.0.7/drivers/spi/dw_spi_pci.c linux-3.0.7/drivers/spi/dw_spi_pci.c
34163--- linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-07-21 22:17:23.000000000 -0400
34164+++ linux-3.0.7/drivers/spi/dw_spi_pci.c 2011-10-11 10:44:33.000000000 -0400
34165@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34166 #define spi_resume NULL
34167 #endif
34168
34169-static const struct pci_device_id pci_ids[] __devinitdata = {
34170+static const struct pci_device_id pci_ids[] __devinitconst = {
34171 /* Intel MID platform SPI controller 0 */
34172 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34173 {},
34174diff -urNp linux-3.0.7/drivers/spi/spi.c linux-3.0.7/drivers/spi/spi.c
34175--- linux-3.0.7/drivers/spi/spi.c 2011-07-21 22:17:23.000000000 -0400
34176+++ linux-3.0.7/drivers/spi/spi.c 2011-08-23 21:47:56.000000000 -0400
34177@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34178 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34179
34180 /* portable code must never pass more than 32 bytes */
34181-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34182+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34183
34184 static u8 *buf;
34185
34186diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34187--- linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-09-02 18:11:21.000000000 -0400
34188+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-23 21:48:14.000000000 -0400
34189@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34190 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34191
34192
34193-static struct net_device_ops ar6000_netdev_ops = {
34194+static net_device_ops_no_const ar6000_netdev_ops = {
34195 .ndo_init = NULL,
34196 .ndo_open = ar6000_open,
34197 .ndo_stop = ar6000_close,
34198diff -urNp linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34199--- linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-07-21 22:17:23.000000000 -0400
34200+++ linux-3.0.7/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-23 21:47:56.000000000 -0400
34201@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34202 typedef struct ar6k_pal_config_s
34203 {
34204 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34205-}ar6k_pal_config_t;
34206+} __no_const ar6k_pal_config_t;
34207
34208 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34209 #endif /* _AR6K_PAL_H_ */
34210diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34211--- linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-07-21 22:17:23.000000000 -0400
34212+++ linux-3.0.7/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-23 21:47:56.000000000 -0400
34213@@ -853,14 +853,14 @@ static void dhd_op_if(dhd_if_t *ifp)
34214 free_netdev(ifp->net);
34215 }
34216 /* Allocate etherdev, including space for private structure */
34217- ifp->net = alloc_etherdev(sizeof(dhd));
34218+ ifp->net = alloc_etherdev(sizeof(*dhd));
34219 if (!ifp->net) {
34220 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34221 ret = -ENOMEM;
34222 }
34223 if (ret == 0) {
34224 strcpy(ifp->net->name, ifp->name);
34225- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
34226+ memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
34227 err = dhd_net_attach(&dhd->pub, ifp->idx);
34228 if (err != 0) {
34229 DHD_ERROR(("%s: dhd_net_attach failed, "
34230@@ -1872,7 +1872,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34231 strcpy(nv_path, nvram_path);
34232
34233 /* Allocate etherdev, including space for private structure */
34234- net = alloc_etherdev(sizeof(dhd));
34235+ net = alloc_etherdev(sizeof(*dhd));
34236 if (!net) {
34237 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34238 goto fail;
34239@@ -1888,7 +1888,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34240 /*
34241 * Save the dhd_info into the priv
34242 */
34243- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34244+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34245
34246 /* Set network interface name if it was provided as module parameter */
34247 if (iface_name[0]) {
34248@@ -2004,7 +2004,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
34249 /*
34250 * Save the dhd_info into the priv
34251 */
34252- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
34253+ memcpy(netdev_priv(net), dhd, sizeof(*dhd));
34254
34255 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
34256 g_bus = bus;
34257diff -urNp linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
34258--- linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-07-21 22:17:23.000000000 -0400
34259+++ linux-3.0.7/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h 2011-08-23 21:47:56.000000000 -0400
34260@@ -593,7 +593,7 @@ struct phy_func_ptr {
34261 initfn_t carrsuppr;
34262 rxsigpwrfn_t rxsigpwr;
34263 detachfn_t detach;
34264-};
34265+} __no_const;
34266 typedef struct phy_func_ptr phy_func_ptr_t;
34267
34268 struct phy_info {
34269diff -urNp linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h
34270--- linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-07-21 22:17:23.000000000 -0400
34271+++ linux-3.0.7/drivers/staging/brcm80211/include/bcmsdh.h 2011-08-23 21:47:56.000000000 -0400
34272@@ -185,7 +185,7 @@ typedef struct {
34273 u16 func, uint bustype, void *regsva, void *param);
34274 /* detach from device */
34275 void (*detach) (void *ch);
34276-} bcmsdh_driver_t;
34277+} __no_const bcmsdh_driver_t;
34278
34279 /* platform specific/high level functions */
34280 extern int bcmsdh_register(bcmsdh_driver_t *driver);
34281diff -urNp linux-3.0.7/drivers/staging/et131x/et1310_tx.c linux-3.0.7/drivers/staging/et131x/et1310_tx.c
34282--- linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-07-21 22:17:23.000000000 -0400
34283+++ linux-3.0.7/drivers/staging/et131x/et1310_tx.c 2011-08-23 21:47:56.000000000 -0400
34284@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34285 struct net_device_stats *stats = &etdev->net_stats;
34286
34287 if (tcb->flags & fMP_DEST_BROAD)
34288- atomic_inc(&etdev->Stats.brdcstxmt);
34289+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34290 else if (tcb->flags & fMP_DEST_MULTI)
34291- atomic_inc(&etdev->Stats.multixmt);
34292+ atomic_inc_unchecked(&etdev->Stats.multixmt);
34293 else
34294- atomic_inc(&etdev->Stats.unixmt);
34295+ atomic_inc_unchecked(&etdev->Stats.unixmt);
34296
34297 if (tcb->skb) {
34298 stats->tx_bytes += tcb->skb->len;
34299diff -urNp linux-3.0.7/drivers/staging/et131x/et131x_adapter.h linux-3.0.7/drivers/staging/et131x/et131x_adapter.h
34300--- linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-07-21 22:17:23.000000000 -0400
34301+++ linux-3.0.7/drivers/staging/et131x/et131x_adapter.h 2011-08-23 21:47:56.000000000 -0400
34302@@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
34303 * operations
34304 */
34305 u32 unircv; /* # multicast packets received */
34306- atomic_t unixmt; /* # multicast packets for Tx */
34307+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34308 u32 multircv; /* # multicast packets received */
34309- atomic_t multixmt; /* # multicast packets for Tx */
34310+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34311 u32 brdcstrcv; /* # broadcast packets received */
34312- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34313+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34314 u32 norcvbuf; /* # Rx packets discarded */
34315 u32 noxmtbuf; /* # Tx packets discarded */
34316
34317diff -urNp linux-3.0.7/drivers/staging/hv/channel.c linux-3.0.7/drivers/staging/hv/channel.c
34318--- linux-3.0.7/drivers/staging/hv/channel.c 2011-09-02 18:11:21.000000000 -0400
34319+++ linux-3.0.7/drivers/staging/hv/channel.c 2011-08-23 21:47:56.000000000 -0400
34320@@ -433,8 +433,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34321 int ret = 0;
34322 int t;
34323
34324- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34325- atomic_inc(&vmbus_connection.next_gpadl_handle);
34326+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34327+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34328
34329 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34330 if (ret)
34331diff -urNp linux-3.0.7/drivers/staging/hv/hv.c linux-3.0.7/drivers/staging/hv/hv.c
34332--- linux-3.0.7/drivers/staging/hv/hv.c 2011-07-21 22:17:23.000000000 -0400
34333+++ linux-3.0.7/drivers/staging/hv/hv.c 2011-08-23 21:47:56.000000000 -0400
34334@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34335 u64 output_address = (output) ? virt_to_phys(output) : 0;
34336 u32 output_address_hi = output_address >> 32;
34337 u32 output_address_lo = output_address & 0xFFFFFFFF;
34338- volatile void *hypercall_page = hv_context.hypercall_page;
34339+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34340
34341 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34342 "=a"(hv_status_lo) : "d" (control_hi),
34343diff -urNp linux-3.0.7/drivers/staging/hv/hv_mouse.c linux-3.0.7/drivers/staging/hv/hv_mouse.c
34344--- linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-07-21 22:17:23.000000000 -0400
34345+++ linux-3.0.7/drivers/staging/hv/hv_mouse.c 2011-08-23 21:47:56.000000000 -0400
34346@@ -879,8 +879,10 @@ static void reportdesc_callback(struct h
34347 if (hid_dev) {
34348 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34349
34350- hid_dev->ll_driver->open = mousevsc_hid_open;
34351- hid_dev->ll_driver->close = mousevsc_hid_close;
34352+ pax_open_kernel();
34353+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34354+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34355+ pax_close_kernel();
34356
34357 hid_dev->bus = BUS_VIRTUAL;
34358 hid_dev->vendor = input_device_ctx->device_info.vendor;
34359diff -urNp linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h
34360--- linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-07-21 22:17:23.000000000 -0400
34361+++ linux-3.0.7/drivers/staging/hv/hyperv_vmbus.h 2011-08-23 21:47:56.000000000 -0400
34362@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34363 struct vmbus_connection {
34364 enum vmbus_connect_state conn_state;
34365
34366- atomic_t next_gpadl_handle;
34367+ atomic_unchecked_t next_gpadl_handle;
34368
34369 /*
34370 * Represents channel interrupts. Each bit position represents a
34371diff -urNp linux-3.0.7/drivers/staging/hv/rndis_filter.c linux-3.0.7/drivers/staging/hv/rndis_filter.c
34372--- linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-09-02 18:11:21.000000000 -0400
34373+++ linux-3.0.7/drivers/staging/hv/rndis_filter.c 2011-08-23 21:47:56.000000000 -0400
34374@@ -43,7 +43,7 @@ struct rndis_device {
34375
34376 enum rndis_device_state state;
34377 u32 link_stat;
34378- atomic_t new_req_id;
34379+ atomic_unchecked_t new_req_id;
34380
34381 spinlock_t request_lock;
34382 struct list_head req_list;
34383@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34384 * template
34385 */
34386 set = &rndis_msg->msg.set_req;
34387- set->req_id = atomic_inc_return(&dev->new_req_id);
34388+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34389
34390 /* Add to the request list */
34391 spin_lock_irqsave(&dev->request_lock, flags);
34392@@ -637,7 +637,7 @@ static void rndis_filter_halt_device(str
34393
34394 /* Setup the rndis set */
34395 halt = &request->request_msg.msg.halt_req;
34396- halt->req_id = atomic_inc_return(&dev->new_req_id);
34397+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34398
34399 /* Ignore return since this msg is optional. */
34400 rndis_filter_send_request(dev, request);
34401diff -urNp linux-3.0.7/drivers/staging/hv/vmbus_drv.c linux-3.0.7/drivers/staging/hv/vmbus_drv.c
34402--- linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-07-21 22:17:23.000000000 -0400
34403+++ linux-3.0.7/drivers/staging/hv/vmbus_drv.c 2011-08-23 21:47:56.000000000 -0400
34404@@ -668,11 +668,11 @@ int vmbus_child_device_register(struct h
34405 {
34406 int ret = 0;
34407
34408- static atomic_t device_num = ATOMIC_INIT(0);
34409+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34410
34411 /* Set the device name. Otherwise, device_register() will fail. */
34412 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34413- atomic_inc_return(&device_num));
34414+ atomic_inc_return_unchecked(&device_num));
34415
34416 /* The new device belongs to this bus */
34417 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34418diff -urNp linux-3.0.7/drivers/staging/iio/ring_generic.h linux-3.0.7/drivers/staging/iio/ring_generic.h
34419--- linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-07-21 22:17:23.000000000 -0400
34420+++ linux-3.0.7/drivers/staging/iio/ring_generic.h 2011-08-23 21:47:56.000000000 -0400
34421@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34422
34423 int (*is_enabled)(struct iio_ring_buffer *ring);
34424 int (*enable)(struct iio_ring_buffer *ring);
34425-};
34426+} __no_const;
34427
34428 struct iio_ring_setup_ops {
34429 int (*preenable)(struct iio_dev *);
34430diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet.c linux-3.0.7/drivers/staging/octeon/ethernet.c
34431--- linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-07-21 22:17:23.000000000 -0400
34432+++ linux-3.0.7/drivers/staging/octeon/ethernet.c 2011-08-23 21:47:56.000000000 -0400
34433@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34434 * since the RX tasklet also increments it.
34435 */
34436 #ifdef CONFIG_64BIT
34437- atomic64_add(rx_status.dropped_packets,
34438- (atomic64_t *)&priv->stats.rx_dropped);
34439+ atomic64_add_unchecked(rx_status.dropped_packets,
34440+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34441 #else
34442- atomic_add(rx_status.dropped_packets,
34443- (atomic_t *)&priv->stats.rx_dropped);
34444+ atomic_add_unchecked(rx_status.dropped_packets,
34445+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34446 #endif
34447 }
34448
34449diff -urNp linux-3.0.7/drivers/staging/octeon/ethernet-rx.c linux-3.0.7/drivers/staging/octeon/ethernet-rx.c
34450--- linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-07-21 22:17:23.000000000 -0400
34451+++ linux-3.0.7/drivers/staging/octeon/ethernet-rx.c 2011-08-23 21:47:56.000000000 -0400
34452@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
34453 /* Increment RX stats for virtual ports */
34454 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34455 #ifdef CONFIG_64BIT
34456- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34457- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34458+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34459+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34460 #else
34461- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34462- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34463+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34464+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34465 #endif
34466 }
34467 netif_receive_skb(skb);
34468@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
34469 dev->name);
34470 */
34471 #ifdef CONFIG_64BIT
34472- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34473+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34474 #else
34475- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34476+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
34477 #endif
34478 dev_kfree_skb_irq(skb);
34479 }
34480diff -urNp linux-3.0.7/drivers/staging/pohmelfs/inode.c linux-3.0.7/drivers/staging/pohmelfs/inode.c
34481--- linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-07-21 22:17:23.000000000 -0400
34482+++ linux-3.0.7/drivers/staging/pohmelfs/inode.c 2011-08-23 21:47:56.000000000 -0400
34483@@ -1856,7 +1856,7 @@ static int pohmelfs_fill_super(struct su
34484 mutex_init(&psb->mcache_lock);
34485 psb->mcache_root = RB_ROOT;
34486 psb->mcache_timeout = msecs_to_jiffies(5000);
34487- atomic_long_set(&psb->mcache_gen, 0);
34488+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
34489
34490 psb->trans_max_pages = 100;
34491
34492@@ -1871,7 +1871,7 @@ static int pohmelfs_fill_super(struct su
34493 INIT_LIST_HEAD(&psb->crypto_ready_list);
34494 INIT_LIST_HEAD(&psb->crypto_active_list);
34495
34496- atomic_set(&psb->trans_gen, 1);
34497+ atomic_set_unchecked(&psb->trans_gen, 1);
34498 atomic_long_set(&psb->total_inodes, 0);
34499
34500 mutex_init(&psb->state_lock);
34501diff -urNp linux-3.0.7/drivers/staging/pohmelfs/mcache.c linux-3.0.7/drivers/staging/pohmelfs/mcache.c
34502--- linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-07-21 22:17:23.000000000 -0400
34503+++ linux-3.0.7/drivers/staging/pohmelfs/mcache.c 2011-08-23 21:47:56.000000000 -0400
34504@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34505 m->data = data;
34506 m->start = start;
34507 m->size = size;
34508- m->gen = atomic_long_inc_return(&psb->mcache_gen);
34509+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34510
34511 mutex_lock(&psb->mcache_lock);
34512 err = pohmelfs_mcache_insert(psb, m);
34513diff -urNp linux-3.0.7/drivers/staging/pohmelfs/netfs.h linux-3.0.7/drivers/staging/pohmelfs/netfs.h
34514--- linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-07-21 22:17:23.000000000 -0400
34515+++ linux-3.0.7/drivers/staging/pohmelfs/netfs.h 2011-08-23 21:47:56.000000000 -0400
34516@@ -571,14 +571,14 @@ struct pohmelfs_config;
34517 struct pohmelfs_sb {
34518 struct rb_root mcache_root;
34519 struct mutex mcache_lock;
34520- atomic_long_t mcache_gen;
34521+ atomic_long_unchecked_t mcache_gen;
34522 unsigned long mcache_timeout;
34523
34524 unsigned int idx;
34525
34526 unsigned int trans_retries;
34527
34528- atomic_t trans_gen;
34529+ atomic_unchecked_t trans_gen;
34530
34531 unsigned int crypto_attached_size;
34532 unsigned int crypto_align_size;
34533diff -urNp linux-3.0.7/drivers/staging/pohmelfs/trans.c linux-3.0.7/drivers/staging/pohmelfs/trans.c
34534--- linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-07-21 22:17:23.000000000 -0400
34535+++ linux-3.0.7/drivers/staging/pohmelfs/trans.c 2011-08-23 21:47:56.000000000 -0400
34536@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34537 int err;
34538 struct netfs_cmd *cmd = t->iovec.iov_base;
34539
34540- t->gen = atomic_inc_return(&psb->trans_gen);
34541+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34542
34543 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34544 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34545diff -urNp linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h
34546--- linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-07-21 22:17:23.000000000 -0400
34547+++ linux-3.0.7/drivers/staging/rtl8712/rtl871x_io.h 2011-08-23 21:47:56.000000000 -0400
34548@@ -83,7 +83,7 @@ struct _io_ops {
34549 u8 *pmem);
34550 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
34551 u8 *pmem);
34552-};
34553+} __no_const;
34554
34555 struct io_req {
34556 struct list_head list;
34557diff -urNp linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c
34558--- linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-07-21 22:17:23.000000000 -0400
34559+++ linux-3.0.7/drivers/staging/sbe-2t3e3/netdev.c 2011-08-24 18:21:41.000000000 -0400
34560@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
34561 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
34562
34563 if (rlen)
34564- if (copy_to_user(data, &resp, rlen))
34565+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
34566 return -EFAULT;
34567
34568 return 0;
34569diff -urNp linux-3.0.7/drivers/staging/tty/stallion.c linux-3.0.7/drivers/staging/tty/stallion.c
34570--- linux-3.0.7/drivers/staging/tty/stallion.c 2011-07-21 22:17:23.000000000 -0400
34571+++ linux-3.0.7/drivers/staging/tty/stallion.c 2011-08-23 21:48:14.000000000 -0400
34572@@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
34573 struct stlport stl_dummyport;
34574 struct stlport *portp;
34575
34576+ pax_track_stack();
34577+
34578 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
34579 return -EFAULT;
34580 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
34581diff -urNp linux-3.0.7/drivers/staging/usbip/usbip_common.h linux-3.0.7/drivers/staging/usbip/usbip_common.h
34582--- linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-07-21 22:17:23.000000000 -0400
34583+++ linux-3.0.7/drivers/staging/usbip/usbip_common.h 2011-08-23 21:47:56.000000000 -0400
34584@@ -315,7 +315,7 @@ struct usbip_device {
34585 void (*shutdown)(struct usbip_device *);
34586 void (*reset)(struct usbip_device *);
34587 void (*unusable)(struct usbip_device *);
34588- } eh_ops;
34589+ } __no_const eh_ops;
34590 };
34591
34592 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
34593diff -urNp linux-3.0.7/drivers/staging/usbip/vhci.h linux-3.0.7/drivers/staging/usbip/vhci.h
34594--- linux-3.0.7/drivers/staging/usbip/vhci.h 2011-07-21 22:17:23.000000000 -0400
34595+++ linux-3.0.7/drivers/staging/usbip/vhci.h 2011-08-23 21:47:56.000000000 -0400
34596@@ -94,7 +94,7 @@ struct vhci_hcd {
34597 unsigned resuming:1;
34598 unsigned long re_timeout;
34599
34600- atomic_t seqnum;
34601+ atomic_unchecked_t seqnum;
34602
34603 /*
34604 * NOTE:
34605diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_hcd.c linux-3.0.7/drivers/staging/usbip/vhci_hcd.c
34606--- linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-09-02 18:11:21.000000000 -0400
34607+++ linux-3.0.7/drivers/staging/usbip/vhci_hcd.c 2011-08-23 21:47:56.000000000 -0400
34608@@ -511,7 +511,7 @@ static void vhci_tx_urb(struct urb *urb)
34609 return;
34610 }
34611
34612- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34613+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34614 if (priv->seqnum == 0xffff)
34615 dev_info(&urb->dev->dev, "seqnum max\n");
34616
34617@@ -765,7 +765,7 @@ static int vhci_urb_dequeue(struct usb_h
34618 return -ENOMEM;
34619 }
34620
34621- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34622+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34623 if (unlink->seqnum == 0xffff)
34624 pr_info("seqnum max\n");
34625
34626@@ -955,7 +955,7 @@ static int vhci_start(struct usb_hcd *hc
34627 vdev->rhport = rhport;
34628 }
34629
34630- atomic_set(&vhci->seqnum, 0);
34631+ atomic_set_unchecked(&vhci->seqnum, 0);
34632 spin_lock_init(&vhci->lock);
34633
34634 hcd->power_budget = 0; /* no limit */
34635diff -urNp linux-3.0.7/drivers/staging/usbip/vhci_rx.c linux-3.0.7/drivers/staging/usbip/vhci_rx.c
34636--- linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-07-21 22:17:23.000000000 -0400
34637+++ linux-3.0.7/drivers/staging/usbip/vhci_rx.c 2011-08-23 21:47:56.000000000 -0400
34638@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
34639 if (!urb) {
34640 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
34641 pr_info("max seqnum %d\n",
34642- atomic_read(&the_controller->seqnum));
34643+ atomic_read_unchecked(&the_controller->seqnum));
34644 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34645 return;
34646 }
34647diff -urNp linux-3.0.7/drivers/staging/vt6655/hostap.c linux-3.0.7/drivers/staging/vt6655/hostap.c
34648--- linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-07-21 22:17:23.000000000 -0400
34649+++ linux-3.0.7/drivers/staging/vt6655/hostap.c 2011-08-23 21:47:56.000000000 -0400
34650@@ -79,14 +79,13 @@ static int msglevel
34651 *
34652 */
34653
34654+static net_device_ops_no_const apdev_netdev_ops;
34655+
34656 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34657 {
34658 PSDevice apdev_priv;
34659 struct net_device *dev = pDevice->dev;
34660 int ret;
34661- const struct net_device_ops apdev_netdev_ops = {
34662- .ndo_start_xmit = pDevice->tx_80211,
34663- };
34664
34665 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34666
34667@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
34668 *apdev_priv = *pDevice;
34669 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34670
34671+ /* only half broken now */
34672+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34673 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34674
34675 pDevice->apdev->type = ARPHRD_IEEE80211;
34676diff -urNp linux-3.0.7/drivers/staging/vt6656/hostap.c linux-3.0.7/drivers/staging/vt6656/hostap.c
34677--- linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-07-21 22:17:23.000000000 -0400
34678+++ linux-3.0.7/drivers/staging/vt6656/hostap.c 2011-08-23 21:47:56.000000000 -0400
34679@@ -80,14 +80,13 @@ static int msglevel
34680 *
34681 */
34682
34683+static net_device_ops_no_const apdev_netdev_ops;
34684+
34685 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
34686 {
34687 PSDevice apdev_priv;
34688 struct net_device *dev = pDevice->dev;
34689 int ret;
34690- const struct net_device_ops apdev_netdev_ops = {
34691- .ndo_start_xmit = pDevice->tx_80211,
34692- };
34693
34694 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
34695
34696@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
34697 *apdev_priv = *pDevice;
34698 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
34699
34700+ /* only half broken now */
34701+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
34702 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
34703
34704 pDevice->apdev->type = ARPHRD_IEEE80211;
34705diff -urNp linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c
34706--- linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-07-21 22:17:23.000000000 -0400
34707+++ linux-3.0.7/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 21:47:56.000000000 -0400
34708@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
34709
34710 struct usbctlx_completor {
34711 int (*complete) (struct usbctlx_completor *);
34712-};
34713+} __no_const;
34714
34715 static int
34716 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
34717diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.c linux-3.0.7/drivers/staging/zcache/tmem.c
34718--- linux-3.0.7/drivers/staging/zcache/tmem.c 2011-07-21 22:17:23.000000000 -0400
34719+++ linux-3.0.7/drivers/staging/zcache/tmem.c 2011-08-23 21:47:56.000000000 -0400
34720@@ -39,7 +39,7 @@
34721 * A tmem host implementation must use this function to register callbacks
34722 * for memory allocation.
34723 */
34724-static struct tmem_hostops tmem_hostops;
34725+static tmem_hostops_no_const tmem_hostops;
34726
34727 static void tmem_objnode_tree_init(void);
34728
34729@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
34730 * A tmem host implementation must use this function to register
34731 * callbacks for a page-accessible memory (PAM) implementation
34732 */
34733-static struct tmem_pamops tmem_pamops;
34734+static tmem_pamops_no_const tmem_pamops;
34735
34736 void tmem_register_pamops(struct tmem_pamops *m)
34737 {
34738diff -urNp linux-3.0.7/drivers/staging/zcache/tmem.h linux-3.0.7/drivers/staging/zcache/tmem.h
34739--- linux-3.0.7/drivers/staging/zcache/tmem.h 2011-07-21 22:17:23.000000000 -0400
34740+++ linux-3.0.7/drivers/staging/zcache/tmem.h 2011-08-23 21:47:56.000000000 -0400
34741@@ -171,6 +171,7 @@ struct tmem_pamops {
34742 int (*get_data)(struct page *, void *, struct tmem_pool *);
34743 void (*free)(void *, struct tmem_pool *);
34744 };
34745+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
34746 extern void tmem_register_pamops(struct tmem_pamops *m);
34747
34748 /* memory allocation methods provided by the host implementation */
34749@@ -180,6 +181,7 @@ struct tmem_hostops {
34750 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
34751 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
34752 };
34753+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
34754 extern void tmem_register_hostops(struct tmem_hostops *m);
34755
34756 /* core tmem accessor functions */
34757diff -urNp linux-3.0.7/drivers/target/target_core_alua.c linux-3.0.7/drivers/target/target_core_alua.c
34758--- linux-3.0.7/drivers/target/target_core_alua.c 2011-07-21 22:17:23.000000000 -0400
34759+++ linux-3.0.7/drivers/target/target_core_alua.c 2011-08-23 21:48:14.000000000 -0400
34760@@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
34761 char path[ALUA_METADATA_PATH_LEN];
34762 int len;
34763
34764+ pax_track_stack();
34765+
34766 memset(path, 0, ALUA_METADATA_PATH_LEN);
34767
34768 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
34769@@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
34770 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
34771 int len;
34772
34773+ pax_track_stack();
34774+
34775 memset(path, 0, ALUA_METADATA_PATH_LEN);
34776 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
34777
34778diff -urNp linux-3.0.7/drivers/target/target_core_cdb.c linux-3.0.7/drivers/target/target_core_cdb.c
34779--- linux-3.0.7/drivers/target/target_core_cdb.c 2011-07-21 22:17:23.000000000 -0400
34780+++ linux-3.0.7/drivers/target/target_core_cdb.c 2011-08-23 21:48:14.000000000 -0400
34781@@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
34782 int length = 0;
34783 unsigned char buf[SE_MODE_PAGE_BUF];
34784
34785+ pax_track_stack();
34786+
34787 memset(buf, 0, SE_MODE_PAGE_BUF);
34788
34789 switch (cdb[2] & 0x3f) {
34790diff -urNp linux-3.0.7/drivers/target/target_core_configfs.c linux-3.0.7/drivers/target/target_core_configfs.c
34791--- linux-3.0.7/drivers/target/target_core_configfs.c 2011-07-21 22:17:23.000000000 -0400
34792+++ linux-3.0.7/drivers/target/target_core_configfs.c 2011-08-23 21:48:14.000000000 -0400
34793@@ -1276,6 +1276,8 @@ static ssize_t target_core_dev_pr_show_a
34794 ssize_t len = 0;
34795 int reg_count = 0, prf_isid;
34796
34797+ pax_track_stack();
34798+
34799 if (!(su_dev->se_dev_ptr))
34800 return -ENODEV;
34801
34802diff -urNp linux-3.0.7/drivers/target/target_core_pr.c linux-3.0.7/drivers/target/target_core_pr.c
34803--- linux-3.0.7/drivers/target/target_core_pr.c 2011-07-21 22:17:23.000000000 -0400
34804+++ linux-3.0.7/drivers/target/target_core_pr.c 2011-08-23 21:48:14.000000000 -0400
34805@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
34806 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
34807 u16 tpgt;
34808
34809+ pax_track_stack();
34810+
34811 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
34812 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
34813 /*
34814@@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
34815 ssize_t len = 0;
34816 int reg_count = 0;
34817
34818+ pax_track_stack();
34819+
34820 memset(buf, 0, pr_aptpl_buf_len);
34821 /*
34822 * Called to clear metadata once APTPL has been deactivated.
34823@@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
34824 char path[512];
34825 int ret;
34826
34827+ pax_track_stack();
34828+
34829 memset(iov, 0, sizeof(struct iovec));
34830 memset(path, 0, 512);
34831
34832diff -urNp linux-3.0.7/drivers/target/target_core_tmr.c linux-3.0.7/drivers/target/target_core_tmr.c
34833--- linux-3.0.7/drivers/target/target_core_tmr.c 2011-07-21 22:17:23.000000000 -0400
34834+++ linux-3.0.7/drivers/target/target_core_tmr.c 2011-08-23 21:47:56.000000000 -0400
34835@@ -269,7 +269,7 @@ int core_tmr_lun_reset(
34836 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
34837 T_TASK(cmd)->t_task_cdbs,
34838 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34839- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34840+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34841 atomic_read(&T_TASK(cmd)->t_transport_active),
34842 atomic_read(&T_TASK(cmd)->t_transport_stop),
34843 atomic_read(&T_TASK(cmd)->t_transport_sent));
34844@@ -311,7 +311,7 @@ int core_tmr_lun_reset(
34845 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
34846 " task: %p, t_fe_count: %d dev: %p\n", task,
34847 fe_count, dev);
34848- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34849+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34850 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
34851 flags);
34852 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34853@@ -321,7 +321,7 @@ int core_tmr_lun_reset(
34854 }
34855 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
34856 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
34857- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
34858+ atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
34859 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
34860 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
34861
34862diff -urNp linux-3.0.7/drivers/target/target_core_transport.c linux-3.0.7/drivers/target/target_core_transport.c
34863--- linux-3.0.7/drivers/target/target_core_transport.c 2011-07-21 22:17:23.000000000 -0400
34864+++ linux-3.0.7/drivers/target/target_core_transport.c 2011-08-23 21:47:56.000000000 -0400
34865@@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
34866
34867 dev->queue_depth = dev_limits->queue_depth;
34868 atomic_set(&dev->depth_left, dev->queue_depth);
34869- atomic_set(&dev->dev_ordered_id, 0);
34870+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
34871
34872 se_dev_set_default_attribs(dev, dev_limits);
34873
34874@@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
34875 * Used to determine when ORDERED commands should go from
34876 * Dormant to Active status.
34877 */
34878- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
34879+ cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
34880 smp_mb__after_atomic_inc();
34881 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
34882 cmd->se_ordered_id, cmd->sam_task_attr,
34883@@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
34884 " t_transport_active: %d t_transport_stop: %d"
34885 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
34886 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34887- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34888+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34889 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
34890 atomic_read(&T_TASK(cmd)->t_transport_active),
34891 atomic_read(&T_TASK(cmd)->t_transport_stop),
34892@@ -2673,9 +2673,9 @@ check_depth:
34893 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
34894 atomic_set(&task->task_active, 1);
34895 atomic_set(&task->task_sent, 1);
34896- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
34897+ atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
34898
34899- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
34900+ if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
34901 T_TASK(cmd)->t_task_cdbs)
34902 atomic_set(&cmd->transport_sent, 1);
34903
34904@@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
34905 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
34906 }
34907 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
34908- atomic_read(&T_TASK(cmd)->t_transport_aborted))
34909+ atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
34910 goto remove;
34911
34912 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
34913@@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
34914 {
34915 int ret = 0;
34916
34917- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
34918+ if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
34919 if (!(send_status) ||
34920 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
34921 return 1;
34922@@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
34923 */
34924 if (cmd->data_direction == DMA_TO_DEVICE) {
34925 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
34926- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
34927+ atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
34928 smp_mb__after_atomic_inc();
34929 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
34930 transport_new_cmd_failure(cmd);
34931@@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
34932 CMD_TFO(cmd)->get_task_tag(cmd),
34933 T_TASK(cmd)->t_task_cdbs,
34934 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
34935- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
34936+ atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
34937 atomic_read(&T_TASK(cmd)->t_transport_active),
34938 atomic_read(&T_TASK(cmd)->t_transport_stop),
34939 atomic_read(&T_TASK(cmd)->t_transport_sent));
34940diff -urNp linux-3.0.7/drivers/telephony/ixj.c linux-3.0.7/drivers/telephony/ixj.c
34941--- linux-3.0.7/drivers/telephony/ixj.c 2011-07-21 22:17:23.000000000 -0400
34942+++ linux-3.0.7/drivers/telephony/ixj.c 2011-08-23 21:48:14.000000000 -0400
34943@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34944 bool mContinue;
34945 char *pIn, *pOut;
34946
34947+ pax_track_stack();
34948+
34949 if (!SCI_Prepare(j))
34950 return 0;
34951
34952diff -urNp linux-3.0.7/drivers/tty/hvc/hvcs.c linux-3.0.7/drivers/tty/hvc/hvcs.c
34953--- linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-07-21 22:17:23.000000000 -0400
34954+++ linux-3.0.7/drivers/tty/hvc/hvcs.c 2011-08-23 21:47:56.000000000 -0400
34955@@ -83,6 +83,7 @@
34956 #include <asm/hvcserver.h>
34957 #include <asm/uaccess.h>
34958 #include <asm/vio.h>
34959+#include <asm/local.h>
34960
34961 /*
34962 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
34963@@ -270,7 +271,7 @@ struct hvcs_struct {
34964 unsigned int index;
34965
34966 struct tty_struct *tty;
34967- int open_count;
34968+ local_t open_count;
34969
34970 /*
34971 * Used to tell the driver kernel_thread what operations need to take
34972@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
34973
34974 spin_lock_irqsave(&hvcsd->lock, flags);
34975
34976- if (hvcsd->open_count > 0) {
34977+ if (local_read(&hvcsd->open_count) > 0) {
34978 spin_unlock_irqrestore(&hvcsd->lock, flags);
34979 printk(KERN_INFO "HVCS: vterm state unchanged. "
34980 "The hvcs device node is still in use.\n");
34981@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
34982 if ((retval = hvcs_partner_connect(hvcsd)))
34983 goto error_release;
34984
34985- hvcsd->open_count = 1;
34986+ local_set(&hvcsd->open_count, 1);
34987 hvcsd->tty = tty;
34988 tty->driver_data = hvcsd;
34989
34990@@ -1179,7 +1180,7 @@ fast_open:
34991
34992 spin_lock_irqsave(&hvcsd->lock, flags);
34993 kref_get(&hvcsd->kref);
34994- hvcsd->open_count++;
34995+ local_inc(&hvcsd->open_count);
34996 hvcsd->todo_mask |= HVCS_SCHED_READ;
34997 spin_unlock_irqrestore(&hvcsd->lock, flags);
34998
34999@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35000 hvcsd = tty->driver_data;
35001
35002 spin_lock_irqsave(&hvcsd->lock, flags);
35003- if (--hvcsd->open_count == 0) {
35004+ if (local_dec_and_test(&hvcsd->open_count)) {
35005
35006 vio_disable_interrupts(hvcsd->vdev);
35007
35008@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35009 free_irq(irq, hvcsd);
35010 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35011 return;
35012- } else if (hvcsd->open_count < 0) {
35013+ } else if (local_read(&hvcsd->open_count) < 0) {
35014 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35015 " is missmanaged.\n",
35016- hvcsd->vdev->unit_address, hvcsd->open_count);
35017+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35018 }
35019
35020 spin_unlock_irqrestore(&hvcsd->lock, flags);
35021@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35022
35023 spin_lock_irqsave(&hvcsd->lock, flags);
35024 /* Preserve this so that we know how many kref refs to put */
35025- temp_open_count = hvcsd->open_count;
35026+ temp_open_count = local_read(&hvcsd->open_count);
35027
35028 /*
35029 * Don't kref put inside the spinlock because the destruction
35030@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35031 hvcsd->tty->driver_data = NULL;
35032 hvcsd->tty = NULL;
35033
35034- hvcsd->open_count = 0;
35035+ local_set(&hvcsd->open_count, 0);
35036
35037 /* This will drop any buffered data on the floor which is OK in a hangup
35038 * scenario. */
35039@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35040 * the middle of a write operation? This is a crummy place to do this
35041 * but we want to keep it all in the spinlock.
35042 */
35043- if (hvcsd->open_count <= 0) {
35044+ if (local_read(&hvcsd->open_count) <= 0) {
35045 spin_unlock_irqrestore(&hvcsd->lock, flags);
35046 return -ENODEV;
35047 }
35048@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35049 {
35050 struct hvcs_struct *hvcsd = tty->driver_data;
35051
35052- if (!hvcsd || hvcsd->open_count <= 0)
35053+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35054 return 0;
35055
35056 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35057diff -urNp linux-3.0.7/drivers/tty/ipwireless/tty.c linux-3.0.7/drivers/tty/ipwireless/tty.c
35058--- linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-07-21 22:17:23.000000000 -0400
35059+++ linux-3.0.7/drivers/tty/ipwireless/tty.c 2011-08-23 21:47:56.000000000 -0400
35060@@ -29,6 +29,7 @@
35061 #include <linux/tty_driver.h>
35062 #include <linux/tty_flip.h>
35063 #include <linux/uaccess.h>
35064+#include <asm/local.h>
35065
35066 #include "tty.h"
35067 #include "network.h"
35068@@ -51,7 +52,7 @@ struct ipw_tty {
35069 int tty_type;
35070 struct ipw_network *network;
35071 struct tty_struct *linux_tty;
35072- int open_count;
35073+ local_t open_count;
35074 unsigned int control_lines;
35075 struct mutex ipw_tty_mutex;
35076 int tx_bytes_queued;
35077@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35078 mutex_unlock(&tty->ipw_tty_mutex);
35079 return -ENODEV;
35080 }
35081- if (tty->open_count == 0)
35082+ if (local_read(&tty->open_count) == 0)
35083 tty->tx_bytes_queued = 0;
35084
35085- tty->open_count++;
35086+ local_inc(&tty->open_count);
35087
35088 tty->linux_tty = linux_tty;
35089 linux_tty->driver_data = tty;
35090@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35091
35092 static void do_ipw_close(struct ipw_tty *tty)
35093 {
35094- tty->open_count--;
35095-
35096- if (tty->open_count == 0) {
35097+ if (local_dec_return(&tty->open_count) == 0) {
35098 struct tty_struct *linux_tty = tty->linux_tty;
35099
35100 if (linux_tty != NULL) {
35101@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35102 return;
35103
35104 mutex_lock(&tty->ipw_tty_mutex);
35105- if (tty->open_count == 0) {
35106+ if (local_read(&tty->open_count) == 0) {
35107 mutex_unlock(&tty->ipw_tty_mutex);
35108 return;
35109 }
35110@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35111 return;
35112 }
35113
35114- if (!tty->open_count) {
35115+ if (!local_read(&tty->open_count)) {
35116 mutex_unlock(&tty->ipw_tty_mutex);
35117 return;
35118 }
35119@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35120 return -ENODEV;
35121
35122 mutex_lock(&tty->ipw_tty_mutex);
35123- if (!tty->open_count) {
35124+ if (!local_read(&tty->open_count)) {
35125 mutex_unlock(&tty->ipw_tty_mutex);
35126 return -EINVAL;
35127 }
35128@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35129 if (!tty)
35130 return -ENODEV;
35131
35132- if (!tty->open_count)
35133+ if (!local_read(&tty->open_count))
35134 return -EINVAL;
35135
35136 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35137@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35138 if (!tty)
35139 return 0;
35140
35141- if (!tty->open_count)
35142+ if (!local_read(&tty->open_count))
35143 return 0;
35144
35145 return tty->tx_bytes_queued;
35146@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35147 if (!tty)
35148 return -ENODEV;
35149
35150- if (!tty->open_count)
35151+ if (!local_read(&tty->open_count))
35152 return -EINVAL;
35153
35154 return get_control_lines(tty);
35155@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35156 if (!tty)
35157 return -ENODEV;
35158
35159- if (!tty->open_count)
35160+ if (!local_read(&tty->open_count))
35161 return -EINVAL;
35162
35163 return set_control_lines(tty, set, clear);
35164@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35165 if (!tty)
35166 return -ENODEV;
35167
35168- if (!tty->open_count)
35169+ if (!local_read(&tty->open_count))
35170 return -EINVAL;
35171
35172 /* FIXME: Exactly how is the tty object locked here .. */
35173@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35174 against a parallel ioctl etc */
35175 mutex_lock(&ttyj->ipw_tty_mutex);
35176 }
35177- while (ttyj->open_count)
35178+ while (local_read(&ttyj->open_count))
35179 do_ipw_close(ttyj);
35180 ipwireless_disassociate_network_ttys(network,
35181 ttyj->channel_idx);
35182diff -urNp linux-3.0.7/drivers/tty/n_gsm.c linux-3.0.7/drivers/tty/n_gsm.c
35183--- linux-3.0.7/drivers/tty/n_gsm.c 2011-09-02 18:11:21.000000000 -0400
35184+++ linux-3.0.7/drivers/tty/n_gsm.c 2011-08-23 21:47:56.000000000 -0400
35185@@ -1589,7 +1589,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35186 return NULL;
35187 spin_lock_init(&dlci->lock);
35188 dlci->fifo = &dlci->_fifo;
35189- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35190+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35191 kfree(dlci);
35192 return NULL;
35193 }
35194diff -urNp linux-3.0.7/drivers/tty/n_tty.c linux-3.0.7/drivers/tty/n_tty.c
35195--- linux-3.0.7/drivers/tty/n_tty.c 2011-07-21 22:17:23.000000000 -0400
35196+++ linux-3.0.7/drivers/tty/n_tty.c 2011-08-23 21:47:56.000000000 -0400
35197@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35198 {
35199 *ops = tty_ldisc_N_TTY;
35200 ops->owner = NULL;
35201- ops->refcount = ops->flags = 0;
35202+ atomic_set(&ops->refcount, 0);
35203+ ops->flags = 0;
35204 }
35205 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35206diff -urNp linux-3.0.7/drivers/tty/pty.c linux-3.0.7/drivers/tty/pty.c
35207--- linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:54:54.000000000 -0400
35208+++ linux-3.0.7/drivers/tty/pty.c 2011-10-16 21:55:28.000000000 -0400
35209@@ -767,8 +767,10 @@ static void __init unix98_pty_init(void)
35210 register_sysctl_table(pty_root_table);
35211
35212 /* Now create the /dev/ptmx special device */
35213+ pax_open_kernel();
35214 tty_default_fops(&ptmx_fops);
35215- ptmx_fops.open = ptmx_open;
35216+ *(void **)&ptmx_fops.open = ptmx_open;
35217+ pax_close_kernel();
35218
35219 cdev_init(&ptmx_cdev, &ptmx_fops);
35220 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35221diff -urNp linux-3.0.7/drivers/tty/rocket.c linux-3.0.7/drivers/tty/rocket.c
35222--- linux-3.0.7/drivers/tty/rocket.c 2011-07-21 22:17:23.000000000 -0400
35223+++ linux-3.0.7/drivers/tty/rocket.c 2011-08-23 21:48:14.000000000 -0400
35224@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35225 struct rocket_ports tmp;
35226 int board;
35227
35228+ pax_track_stack();
35229+
35230 if (!retports)
35231 return -EFAULT;
35232 memset(&tmp, 0, sizeof (tmp));
35233diff -urNp linux-3.0.7/drivers/tty/serial/kgdboc.c linux-3.0.7/drivers/tty/serial/kgdboc.c
35234--- linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-07-21 22:17:23.000000000 -0400
35235+++ linux-3.0.7/drivers/tty/serial/kgdboc.c 2011-08-23 21:47:56.000000000 -0400
35236@@ -23,8 +23,9 @@
35237 #define MAX_CONFIG_LEN 40
35238
35239 static struct kgdb_io kgdboc_io_ops;
35240+static struct kgdb_io kgdboc_io_ops_console;
35241
35242-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35243+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35244 static int configured = -1;
35245
35246 static char config[MAX_CONFIG_LEN];
35247@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35248 kgdboc_unregister_kbd();
35249 if (configured == 1)
35250 kgdb_unregister_io_module(&kgdboc_io_ops);
35251+ else if (configured == 2)
35252+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35253 }
35254
35255 static int configure_kgdboc(void)
35256@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35257 int err;
35258 char *cptr = config;
35259 struct console *cons;
35260+ int is_console = 0;
35261
35262 err = kgdboc_option_setup(config);
35263 if (err || !strlen(config) || isspace(config[0]))
35264 goto noconfig;
35265
35266 err = -ENODEV;
35267- kgdboc_io_ops.is_console = 0;
35268 kgdb_tty_driver = NULL;
35269
35270 kgdboc_use_kms = 0;
35271@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35272 int idx;
35273 if (cons->device && cons->device(cons, &idx) == p &&
35274 idx == tty_line) {
35275- kgdboc_io_ops.is_console = 1;
35276+ is_console = 1;
35277 break;
35278 }
35279 cons = cons->next;
35280@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35281 kgdb_tty_line = tty_line;
35282
35283 do_register:
35284- err = kgdb_register_io_module(&kgdboc_io_ops);
35285+ if (is_console) {
35286+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35287+ configured = 2;
35288+ } else {
35289+ err = kgdb_register_io_module(&kgdboc_io_ops);
35290+ configured = 1;
35291+ }
35292 if (err)
35293 goto noconfig;
35294
35295- configured = 1;
35296-
35297 return 0;
35298
35299 noconfig:
35300@@ -212,7 +219,7 @@ noconfig:
35301 static int __init init_kgdboc(void)
35302 {
35303 /* Already configured? */
35304- if (configured == 1)
35305+ if (configured >= 1)
35306 return 0;
35307
35308 return configure_kgdboc();
35309@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35310 if (config[len - 1] == '\n')
35311 config[len - 1] = '\0';
35312
35313- if (configured == 1)
35314+ if (configured >= 1)
35315 cleanup_kgdboc();
35316
35317 /* Go and configure with the new params. */
35318@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35319 .post_exception = kgdboc_post_exp_handler,
35320 };
35321
35322+static struct kgdb_io kgdboc_io_ops_console = {
35323+ .name = "kgdboc",
35324+ .read_char = kgdboc_get_char,
35325+ .write_char = kgdboc_put_char,
35326+ .pre_exception = kgdboc_pre_exp_handler,
35327+ .post_exception = kgdboc_post_exp_handler,
35328+ .is_console = 1
35329+};
35330+
35331 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35332 /* This is only available if kgdboc is a built in for early debugging */
35333 static int __init kgdboc_early_init(char *opt)
35334diff -urNp linux-3.0.7/drivers/tty/serial/mfd.c linux-3.0.7/drivers/tty/serial/mfd.c
35335--- linux-3.0.7/drivers/tty/serial/mfd.c 2011-07-21 22:17:23.000000000 -0400
35336+++ linux-3.0.7/drivers/tty/serial/mfd.c 2011-10-11 10:44:33.000000000 -0400
35337@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35338 }
35339
35340 /* First 3 are UART ports, and the 4th is the DMA */
35341-static const struct pci_device_id pci_ids[] __devinitdata = {
35342+static const struct pci_device_id pci_ids[] __devinitconst = {
35343 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35344 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35345 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35346diff -urNp linux-3.0.7/drivers/tty/serial/mrst_max3110.c linux-3.0.7/drivers/tty/serial/mrst_max3110.c
35347--- linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:54:54.000000000 -0400
35348+++ linux-3.0.7/drivers/tty/serial/mrst_max3110.c 2011-10-16 21:55:28.000000000 -0400
35349@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35350 int loop = 1, num, total = 0;
35351 u8 recv_buf[512], *pbuf;
35352
35353+ pax_track_stack();
35354+
35355 pbuf = recv_buf;
35356 do {
35357 num = max3110_read_multi(max, pbuf);
35358diff -urNp linux-3.0.7/drivers/tty/tty_io.c linux-3.0.7/drivers/tty/tty_io.c
35359--- linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:54:54.000000000 -0400
35360+++ linux-3.0.7/drivers/tty/tty_io.c 2011-10-16 21:55:28.000000000 -0400
35361@@ -3214,7 +3214,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35362
35363 void tty_default_fops(struct file_operations *fops)
35364 {
35365- *fops = tty_fops;
35366+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35367 }
35368
35369 /*
35370diff -urNp linux-3.0.7/drivers/tty/tty_ldisc.c linux-3.0.7/drivers/tty/tty_ldisc.c
35371--- linux-3.0.7/drivers/tty/tty_ldisc.c 2011-07-21 22:17:23.000000000 -0400
35372+++ linux-3.0.7/drivers/tty/tty_ldisc.c 2011-08-23 21:47:56.000000000 -0400
35373@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35374 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35375 struct tty_ldisc_ops *ldo = ld->ops;
35376
35377- ldo->refcount--;
35378+ atomic_dec(&ldo->refcount);
35379 module_put(ldo->owner);
35380 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35381
35382@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35383 spin_lock_irqsave(&tty_ldisc_lock, flags);
35384 tty_ldiscs[disc] = new_ldisc;
35385 new_ldisc->num = disc;
35386- new_ldisc->refcount = 0;
35387+ atomic_set(&new_ldisc->refcount, 0);
35388 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35389
35390 return ret;
35391@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35392 return -EINVAL;
35393
35394 spin_lock_irqsave(&tty_ldisc_lock, flags);
35395- if (tty_ldiscs[disc]->refcount)
35396+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35397 ret = -EBUSY;
35398 else
35399 tty_ldiscs[disc] = NULL;
35400@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35401 if (ldops) {
35402 ret = ERR_PTR(-EAGAIN);
35403 if (try_module_get(ldops->owner)) {
35404- ldops->refcount++;
35405+ atomic_inc(&ldops->refcount);
35406 ret = ldops;
35407 }
35408 }
35409@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35410 unsigned long flags;
35411
35412 spin_lock_irqsave(&tty_ldisc_lock, flags);
35413- ldops->refcount--;
35414+ atomic_dec(&ldops->refcount);
35415 module_put(ldops->owner);
35416 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35417 }
35418diff -urNp linux-3.0.7/drivers/tty/vt/keyboard.c linux-3.0.7/drivers/tty/vt/keyboard.c
35419--- linux-3.0.7/drivers/tty/vt/keyboard.c 2011-07-21 22:17:23.000000000 -0400
35420+++ linux-3.0.7/drivers/tty/vt/keyboard.c 2011-08-23 21:48:14.000000000 -0400
35421@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35422 kbd->kbdmode == VC_OFF) &&
35423 value != KVAL(K_SAK))
35424 return; /* SAK is allowed even in raw mode */
35425+
35426+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35427+ {
35428+ void *func = fn_handler[value];
35429+ if (func == fn_show_state || func == fn_show_ptregs ||
35430+ func == fn_show_mem)
35431+ return;
35432+ }
35433+#endif
35434+
35435 fn_handler[value](vc);
35436 }
35437
35438diff -urNp linux-3.0.7/drivers/tty/vt/vt.c linux-3.0.7/drivers/tty/vt/vt.c
35439--- linux-3.0.7/drivers/tty/vt/vt.c 2011-07-21 22:17:23.000000000 -0400
35440+++ linux-3.0.7/drivers/tty/vt/vt.c 2011-08-23 21:47:56.000000000 -0400
35441@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
35442
35443 static void notify_write(struct vc_data *vc, unsigned int unicode)
35444 {
35445- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
35446+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
35447 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
35448 }
35449
35450diff -urNp linux-3.0.7/drivers/tty/vt/vt_ioctl.c linux-3.0.7/drivers/tty/vt/vt_ioctl.c
35451--- linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-07-21 22:17:23.000000000 -0400
35452+++ linux-3.0.7/drivers/tty/vt/vt_ioctl.c 2011-08-23 21:48:14.000000000 -0400
35453@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35454 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35455 return -EFAULT;
35456
35457- if (!capable(CAP_SYS_TTY_CONFIG))
35458- perm = 0;
35459-
35460 switch (cmd) {
35461 case KDGKBENT:
35462 key_map = key_maps[s];
35463@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
35464 val = (i ? K_HOLE : K_NOSUCHMAP);
35465 return put_user(val, &user_kbe->kb_value);
35466 case KDSKBENT:
35467+ if (!capable(CAP_SYS_TTY_CONFIG))
35468+ perm = 0;
35469+
35470 if (!perm)
35471 return -EPERM;
35472 if (!i && v == K_NOSUCHMAP) {
35473@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35474 int i, j, k;
35475 int ret;
35476
35477- if (!capable(CAP_SYS_TTY_CONFIG))
35478- perm = 0;
35479-
35480 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35481 if (!kbs) {
35482 ret = -ENOMEM;
35483@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
35484 kfree(kbs);
35485 return ((p && *p) ? -EOVERFLOW : 0);
35486 case KDSKBSENT:
35487+ if (!capable(CAP_SYS_TTY_CONFIG))
35488+ perm = 0;
35489+
35490 if (!perm) {
35491 ret = -EPERM;
35492 goto reterr;
35493diff -urNp linux-3.0.7/drivers/uio/uio.c linux-3.0.7/drivers/uio/uio.c
35494--- linux-3.0.7/drivers/uio/uio.c 2011-07-21 22:17:23.000000000 -0400
35495+++ linux-3.0.7/drivers/uio/uio.c 2011-08-23 21:47:56.000000000 -0400
35496@@ -25,6 +25,7 @@
35497 #include <linux/kobject.h>
35498 #include <linux/cdev.h>
35499 #include <linux/uio_driver.h>
35500+#include <asm/local.h>
35501
35502 #define UIO_MAX_DEVICES (1U << MINORBITS)
35503
35504@@ -32,10 +33,10 @@ struct uio_device {
35505 struct module *owner;
35506 struct device *dev;
35507 int minor;
35508- atomic_t event;
35509+ atomic_unchecked_t event;
35510 struct fasync_struct *async_queue;
35511 wait_queue_head_t wait;
35512- int vma_count;
35513+ local_t vma_count;
35514 struct uio_info *info;
35515 struct kobject *map_dir;
35516 struct kobject *portio_dir;
35517@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
35518 struct device_attribute *attr, char *buf)
35519 {
35520 struct uio_device *idev = dev_get_drvdata(dev);
35521- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35522+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35523 }
35524
35525 static struct device_attribute uio_class_attributes[] = {
35526@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
35527 {
35528 struct uio_device *idev = info->uio_dev;
35529
35530- atomic_inc(&idev->event);
35531+ atomic_inc_unchecked(&idev->event);
35532 wake_up_interruptible(&idev->wait);
35533 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35534 }
35535@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
35536 }
35537
35538 listener->dev = idev;
35539- listener->event_count = atomic_read(&idev->event);
35540+ listener->event_count = atomic_read_unchecked(&idev->event);
35541 filep->private_data = listener;
35542
35543 if (idev->info->open) {
35544@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
35545 return -EIO;
35546
35547 poll_wait(filep, &idev->wait, wait);
35548- if (listener->event_count != atomic_read(&idev->event))
35549+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35550 return POLLIN | POLLRDNORM;
35551 return 0;
35552 }
35553@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
35554 do {
35555 set_current_state(TASK_INTERRUPTIBLE);
35556
35557- event_count = atomic_read(&idev->event);
35558+ event_count = atomic_read_unchecked(&idev->event);
35559 if (event_count != listener->event_count) {
35560 if (copy_to_user(buf, &event_count, count))
35561 retval = -EFAULT;
35562@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
35563 static void uio_vma_open(struct vm_area_struct *vma)
35564 {
35565 struct uio_device *idev = vma->vm_private_data;
35566- idev->vma_count++;
35567+ local_inc(&idev->vma_count);
35568 }
35569
35570 static void uio_vma_close(struct vm_area_struct *vma)
35571 {
35572 struct uio_device *idev = vma->vm_private_data;
35573- idev->vma_count--;
35574+ local_dec(&idev->vma_count);
35575 }
35576
35577 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35578@@ -823,7 +824,7 @@ int __uio_register_device(struct module
35579 idev->owner = owner;
35580 idev->info = info;
35581 init_waitqueue_head(&idev->wait);
35582- atomic_set(&idev->event, 0);
35583+ atomic_set_unchecked(&idev->event, 0);
35584
35585 ret = uio_get_minor(idev);
35586 if (ret)
35587diff -urNp linux-3.0.7/drivers/usb/atm/cxacru.c linux-3.0.7/drivers/usb/atm/cxacru.c
35588--- linux-3.0.7/drivers/usb/atm/cxacru.c 2011-07-21 22:17:23.000000000 -0400
35589+++ linux-3.0.7/drivers/usb/atm/cxacru.c 2011-08-23 21:47:56.000000000 -0400
35590@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
35591 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
35592 if (ret < 2)
35593 return -EINVAL;
35594- if (index < 0 || index > 0x7f)
35595+ if (index > 0x7f)
35596 return -EINVAL;
35597 pos += tmp;
35598
35599diff -urNp linux-3.0.7/drivers/usb/atm/usbatm.c linux-3.0.7/drivers/usb/atm/usbatm.c
35600--- linux-3.0.7/drivers/usb/atm/usbatm.c 2011-07-21 22:17:23.000000000 -0400
35601+++ linux-3.0.7/drivers/usb/atm/usbatm.c 2011-08-23 21:47:56.000000000 -0400
35602@@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
35603 if (printk_ratelimit())
35604 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35605 __func__, vpi, vci);
35606- atomic_inc(&vcc->stats->rx_err);
35607+ atomic_inc_unchecked(&vcc->stats->rx_err);
35608 return;
35609 }
35610
35611@@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
35612 if (length > ATM_MAX_AAL5_PDU) {
35613 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35614 __func__, length, vcc);
35615- atomic_inc(&vcc->stats->rx_err);
35616+ atomic_inc_unchecked(&vcc->stats->rx_err);
35617 goto out;
35618 }
35619
35620@@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
35621 if (sarb->len < pdu_length) {
35622 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35623 __func__, pdu_length, sarb->len, vcc);
35624- atomic_inc(&vcc->stats->rx_err);
35625+ atomic_inc_unchecked(&vcc->stats->rx_err);
35626 goto out;
35627 }
35628
35629 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35630 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35631 __func__, vcc);
35632- atomic_inc(&vcc->stats->rx_err);
35633+ atomic_inc_unchecked(&vcc->stats->rx_err);
35634 goto out;
35635 }
35636
35637@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
35638 if (printk_ratelimit())
35639 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35640 __func__, length);
35641- atomic_inc(&vcc->stats->rx_drop);
35642+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35643 goto out;
35644 }
35645
35646@@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
35647
35648 vcc->push(vcc, skb);
35649
35650- atomic_inc(&vcc->stats->rx);
35651+ atomic_inc_unchecked(&vcc->stats->rx);
35652 out:
35653 skb_trim(sarb, 0);
35654 }
35655@@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
35656 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35657
35658 usbatm_pop(vcc, skb);
35659- atomic_inc(&vcc->stats->tx);
35660+ atomic_inc_unchecked(&vcc->stats->tx);
35661
35662 skb = skb_dequeue(&instance->sndqueue);
35663 }
35664@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
35665 if (!left--)
35666 return sprintf(page,
35667 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35668- atomic_read(&atm_dev->stats.aal5.tx),
35669- atomic_read(&atm_dev->stats.aal5.tx_err),
35670- atomic_read(&atm_dev->stats.aal5.rx),
35671- atomic_read(&atm_dev->stats.aal5.rx_err),
35672- atomic_read(&atm_dev->stats.aal5.rx_drop));
35673+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35674+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35675+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35676+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35677+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35678
35679 if (!left--) {
35680 if (instance->disconnected)
35681diff -urNp linux-3.0.7/drivers/usb/core/devices.c linux-3.0.7/drivers/usb/core/devices.c
35682--- linux-3.0.7/drivers/usb/core/devices.c 2011-07-21 22:17:23.000000000 -0400
35683+++ linux-3.0.7/drivers/usb/core/devices.c 2011-08-23 21:47:56.000000000 -0400
35684@@ -126,7 +126,7 @@ static const char format_endpt[] =
35685 * time it gets called.
35686 */
35687 static struct device_connect_event {
35688- atomic_t count;
35689+ atomic_unchecked_t count;
35690 wait_queue_head_t wait;
35691 } device_event = {
35692 .count = ATOMIC_INIT(1),
35693@@ -164,7 +164,7 @@ static const struct class_info clas_info
35694
35695 void usbfs_conn_disc_event(void)
35696 {
35697- atomic_add(2, &device_event.count);
35698+ atomic_add_unchecked(2, &device_event.count);
35699 wake_up(&device_event.wait);
35700 }
35701
35702@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
35703
35704 poll_wait(file, &device_event.wait, wait);
35705
35706- event_count = atomic_read(&device_event.count);
35707+ event_count = atomic_read_unchecked(&device_event.count);
35708 if (file->f_version != event_count) {
35709 file->f_version = event_count;
35710 return POLLIN | POLLRDNORM;
35711diff -urNp linux-3.0.7/drivers/usb/core/message.c linux-3.0.7/drivers/usb/core/message.c
35712--- linux-3.0.7/drivers/usb/core/message.c 2011-07-21 22:17:23.000000000 -0400
35713+++ linux-3.0.7/drivers/usb/core/message.c 2011-08-23 21:47:56.000000000 -0400
35714@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
35715 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35716 if (buf) {
35717 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35718- if (len > 0) {
35719- smallbuf = kmalloc(++len, GFP_NOIO);
35720+ if (len++ > 0) {
35721+ smallbuf = kmalloc(len, GFP_NOIO);
35722 if (!smallbuf)
35723 return buf;
35724 memcpy(smallbuf, buf, len);
35725diff -urNp linux-3.0.7/drivers/usb/early/ehci-dbgp.c linux-3.0.7/drivers/usb/early/ehci-dbgp.c
35726--- linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-07-21 22:17:23.000000000 -0400
35727+++ linux-3.0.7/drivers/usb/early/ehci-dbgp.c 2011-08-23 21:47:56.000000000 -0400
35728@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
35729
35730 #ifdef CONFIG_KGDB
35731 static struct kgdb_io kgdbdbgp_io_ops;
35732-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
35733+static struct kgdb_io kgdbdbgp_io_ops_console;
35734+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
35735 #else
35736 #define dbgp_kgdb_mode (0)
35737 #endif
35738@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
35739 .write_char = kgdbdbgp_write_char,
35740 };
35741
35742+static struct kgdb_io kgdbdbgp_io_ops_console = {
35743+ .name = "kgdbdbgp",
35744+ .read_char = kgdbdbgp_read_char,
35745+ .write_char = kgdbdbgp_write_char,
35746+ .is_console = 1
35747+};
35748+
35749 static int kgdbdbgp_wait_time;
35750
35751 static int __init kgdbdbgp_parse_config(char *str)
35752@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
35753 ptr++;
35754 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
35755 }
35756- kgdb_register_io_module(&kgdbdbgp_io_ops);
35757- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
35758+ if (early_dbgp_console.index != -1)
35759+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
35760+ else
35761+ kgdb_register_io_module(&kgdbdbgp_io_ops);
35762
35763 return 0;
35764 }
35765diff -urNp linux-3.0.7/drivers/usb/host/xhci-mem.c linux-3.0.7/drivers/usb/host/xhci-mem.c
35766--- linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-07-21 22:17:23.000000000 -0400
35767+++ linux-3.0.7/drivers/usb/host/xhci-mem.c 2011-08-23 21:48:14.000000000 -0400
35768@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(str
35769 unsigned int num_tests;
35770 int i, ret;
35771
35772+ pax_track_stack();
35773+
35774 num_tests = ARRAY_SIZE(simple_test_vector);
35775 for (i = 0; i < num_tests; i++) {
35776 ret = xhci_test_trb_in_td(xhci,
35777diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-hc.h linux-3.0.7/drivers/usb/wusbcore/wa-hc.h
35778--- linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-07-21 22:17:23.000000000 -0400
35779+++ linux-3.0.7/drivers/usb/wusbcore/wa-hc.h 2011-08-23 21:47:56.000000000 -0400
35780@@ -192,7 +192,7 @@ struct wahc {
35781 struct list_head xfer_delayed_list;
35782 spinlock_t xfer_list_lock;
35783 struct work_struct xfer_work;
35784- atomic_t xfer_id_count;
35785+ atomic_unchecked_t xfer_id_count;
35786 };
35787
35788
35789@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35790 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35791 spin_lock_init(&wa->xfer_list_lock);
35792 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35793- atomic_set(&wa->xfer_id_count, 1);
35794+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35795 }
35796
35797 /**
35798diff -urNp linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c
35799--- linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-07-21 22:17:23.000000000 -0400
35800+++ linux-3.0.7/drivers/usb/wusbcore/wa-xfer.c 2011-08-23 21:47:56.000000000 -0400
35801@@ -294,7 +294,7 @@ out:
35802 */
35803 static void wa_xfer_id_init(struct wa_xfer *xfer)
35804 {
35805- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35806+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35807 }
35808
35809 /*
35810diff -urNp linux-3.0.7/drivers/vhost/vhost.c linux-3.0.7/drivers/vhost/vhost.c
35811--- linux-3.0.7/drivers/vhost/vhost.c 2011-07-21 22:17:23.000000000 -0400
35812+++ linux-3.0.7/drivers/vhost/vhost.c 2011-08-23 21:47:56.000000000 -0400
35813@@ -589,7 +589,7 @@ static int init_used(struct vhost_virtqu
35814 return get_user(vq->last_used_idx, &used->idx);
35815 }
35816
35817-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
35818+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
35819 {
35820 struct file *eventfp, *filep = NULL,
35821 *pollstart = NULL, *pollstop = NULL;
35822diff -urNp linux-3.0.7/drivers/video/aty/aty128fb.c linux-3.0.7/drivers/video/aty/aty128fb.c
35823--- linux-3.0.7/drivers/video/aty/aty128fb.c 2011-07-21 22:17:23.000000000 -0400
35824+++ linux-3.0.7/drivers/video/aty/aty128fb.c 2011-10-11 10:44:33.000000000 -0400
35825@@ -148,7 +148,7 @@ enum {
35826 };
35827
35828 /* Must match above enum */
35829-static const char *r128_family[] __devinitdata = {
35830+static const char *r128_family[] __devinitconst = {
35831 "AGP",
35832 "PCI",
35833 "PRO AGP",
35834diff -urNp linux-3.0.7/drivers/video/fbcmap.c linux-3.0.7/drivers/video/fbcmap.c
35835--- linux-3.0.7/drivers/video/fbcmap.c 2011-07-21 22:17:23.000000000 -0400
35836+++ linux-3.0.7/drivers/video/fbcmap.c 2011-08-23 21:47:56.000000000 -0400
35837@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35838 rc = -ENODEV;
35839 goto out;
35840 }
35841- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35842- !info->fbops->fb_setcmap)) {
35843+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35844 rc = -EINVAL;
35845 goto out1;
35846 }
35847diff -urNp linux-3.0.7/drivers/video/fbmem.c linux-3.0.7/drivers/video/fbmem.c
35848--- linux-3.0.7/drivers/video/fbmem.c 2011-07-21 22:17:23.000000000 -0400
35849+++ linux-3.0.7/drivers/video/fbmem.c 2011-08-23 21:48:14.000000000 -0400
35850@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
35851 image->dx += image->width + 8;
35852 }
35853 } else if (rotate == FB_ROTATE_UD) {
35854- for (x = 0; x < num && image->dx >= 0; x++) {
35855+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35856 info->fbops->fb_imageblit(info, image);
35857 image->dx -= image->width + 8;
35858 }
35859@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
35860 image->dy += image->height + 8;
35861 }
35862 } else if (rotate == FB_ROTATE_CCW) {
35863- for (x = 0; x < num && image->dy >= 0; x++) {
35864+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35865 info->fbops->fb_imageblit(info, image);
35866 image->dy -= image->height + 8;
35867 }
35868@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
35869 int flags = info->flags;
35870 int ret = 0;
35871
35872+ pax_track_stack();
35873+
35874 if (var->activate & FB_ACTIVATE_INV_MODE) {
35875 struct fb_videomode mode1, mode2;
35876
35877@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
35878 void __user *argp = (void __user *)arg;
35879 long ret = 0;
35880
35881+ pax_track_stack();
35882+
35883 switch (cmd) {
35884 case FBIOGET_VSCREENINFO:
35885 if (!lock_fb_info(info))
35886@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
35887 return -EFAULT;
35888 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35889 return -EINVAL;
35890- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35891+ if (con2fb.framebuffer >= FB_MAX)
35892 return -EINVAL;
35893 if (!registered_fb[con2fb.framebuffer])
35894 request_module("fb%d", con2fb.framebuffer);
35895diff -urNp linux-3.0.7/drivers/video/geode/gx1fb_core.c linux-3.0.7/drivers/video/geode/gx1fb_core.c
35896--- linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-07-21 22:17:23.000000000 -0400
35897+++ linux-3.0.7/drivers/video/geode/gx1fb_core.c 2011-10-11 10:44:33.000000000 -0400
35898@@ -29,7 +29,7 @@ static int crt_option = 1;
35899 static char panel_option[32] = "";
35900
35901 /* Modes relevant to the GX1 (taken from modedb.c) */
35902-static const struct fb_videomode __devinitdata gx1_modedb[] = {
35903+static const struct fb_videomode __devinitconst gx1_modedb[] = {
35904 /* 640x480-60 VESA */
35905 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
35906 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
35907diff -urNp linux-3.0.7/drivers/video/gxt4500.c linux-3.0.7/drivers/video/gxt4500.c
35908--- linux-3.0.7/drivers/video/gxt4500.c 2011-07-21 22:17:23.000000000 -0400
35909+++ linux-3.0.7/drivers/video/gxt4500.c 2011-10-11 10:44:33.000000000 -0400
35910@@ -156,7 +156,7 @@ struct gxt4500_par {
35911 static char *mode_option;
35912
35913 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
35914-static const struct fb_videomode defaultmode __devinitdata = {
35915+static const struct fb_videomode defaultmode __devinitconst = {
35916 .refresh = 60,
35917 .xres = 1280,
35918 .yres = 1024,
35919@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
35920 return 0;
35921 }
35922
35923-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
35924+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
35925 .id = "IBM GXT4500P",
35926 .type = FB_TYPE_PACKED_PIXELS,
35927 .visual = FB_VISUAL_PSEUDOCOLOR,
35928diff -urNp linux-3.0.7/drivers/video/i810/i810_accel.c linux-3.0.7/drivers/video/i810/i810_accel.c
35929--- linux-3.0.7/drivers/video/i810/i810_accel.c 2011-07-21 22:17:23.000000000 -0400
35930+++ linux-3.0.7/drivers/video/i810/i810_accel.c 2011-08-23 21:47:56.000000000 -0400
35931@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35932 }
35933 }
35934 printk("ringbuffer lockup!!!\n");
35935+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35936 i810_report_error(mmio);
35937 par->dev_flags |= LOCKUP;
35938 info->pixmap.scan_align = 1;
35939diff -urNp linux-3.0.7/drivers/video/i810/i810_main.c linux-3.0.7/drivers/video/i810/i810_main.c
35940--- linux-3.0.7/drivers/video/i810/i810_main.c 2011-07-21 22:17:23.000000000 -0400
35941+++ linux-3.0.7/drivers/video/i810/i810_main.c 2011-10-11 10:44:33.000000000 -0400
35942@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
35943 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
35944
35945 /* PCI */
35946-static const char *i810_pci_list[] __devinitdata = {
35947+static const char *i810_pci_list[] __devinitconst = {
35948 "Intel(R) 810 Framebuffer Device" ,
35949 "Intel(R) 810-DC100 Framebuffer Device" ,
35950 "Intel(R) 810E Framebuffer Device" ,
35951diff -urNp linux-3.0.7/drivers/video/jz4740_fb.c linux-3.0.7/drivers/video/jz4740_fb.c
35952--- linux-3.0.7/drivers/video/jz4740_fb.c 2011-07-21 22:17:23.000000000 -0400
35953+++ linux-3.0.7/drivers/video/jz4740_fb.c 2011-10-11 10:44:33.000000000 -0400
35954@@ -136,7 +136,7 @@ struct jzfb {
35955 uint32_t pseudo_palette[16];
35956 };
35957
35958-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
35959+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
35960 .id = "JZ4740 FB",
35961 .type = FB_TYPE_PACKED_PIXELS,
35962 .visual = FB_VISUAL_TRUECOLOR,
35963diff -urNp linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm
35964--- linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-07-21 22:17:23.000000000 -0400
35965+++ linux-3.0.7/drivers/video/logo/logo_linux_clut224.ppm 2011-08-29 23:49:40.000000000 -0400
35966@@ -1,1604 +1,1123 @@
35967 P3
35968-# Standard 224-color Linux logo
35969 80 80
35970 255
35971- 0 0 0 0 0 0 0 0 0 0 0 0
35972- 0 0 0 0 0 0 0 0 0 0 0 0
35973- 0 0 0 0 0 0 0 0 0 0 0 0
35974- 0 0 0 0 0 0 0 0 0 0 0 0
35975- 0 0 0 0 0 0 0 0 0 0 0 0
35976- 0 0 0 0 0 0 0 0 0 0 0 0
35977- 0 0 0 0 0 0 0 0 0 0 0 0
35978- 0 0 0 0 0 0 0 0 0 0 0 0
35979- 0 0 0 0 0 0 0 0 0 0 0 0
35980- 6 6 6 6 6 6 10 10 10 10 10 10
35981- 10 10 10 6 6 6 6 6 6 6 6 6
35982- 0 0 0 0 0 0 0 0 0 0 0 0
35983- 0 0 0 0 0 0 0 0 0 0 0 0
35984- 0 0 0 0 0 0 0 0 0 0 0 0
35985- 0 0 0 0 0 0 0 0 0 0 0 0
35986- 0 0 0 0 0 0 0 0 0 0 0 0
35987- 0 0 0 0 0 0 0 0 0 0 0 0
35988- 0 0 0 0 0 0 0 0 0 0 0 0
35989- 0 0 0 0 0 0 0 0 0 0 0 0
35990- 0 0 0 0 0 0 0 0 0 0 0 0
35991- 0 0 0 0 0 0 0 0 0 0 0 0
35992- 0 0 0 0 0 0 0 0 0 0 0 0
35993- 0 0 0 0 0 0 0 0 0 0 0 0
35994- 0 0 0 0 0 0 0 0 0 0 0 0
35995- 0 0 0 0 0 0 0 0 0 0 0 0
35996- 0 0 0 0 0 0 0 0 0 0 0 0
35997- 0 0 0 0 0 0 0 0 0 0 0 0
35998- 0 0 0 0 0 0 0 0 0 0 0 0
35999- 0 0 0 6 6 6 10 10 10 14 14 14
36000- 22 22 22 26 26 26 30 30 30 34 34 34
36001- 30 30 30 30 30 30 26 26 26 18 18 18
36002- 14 14 14 10 10 10 6 6 6 0 0 0
36003- 0 0 0 0 0 0 0 0 0 0 0 0
36004- 0 0 0 0 0 0 0 0 0 0 0 0
36005- 0 0 0 0 0 0 0 0 0 0 0 0
36006- 0 0 0 0 0 0 0 0 0 0 0 0
36007- 0 0 0 0 0 0 0 0 0 0 0 0
36008- 0 0 0 0 0 0 0 0 0 0 0 0
36009- 0 0 0 0 0 0 0 0 0 0 0 0
36010- 0 0 0 0 0 0 0 0 0 0 0 0
36011- 0 0 0 0 0 0 0 0 0 0 0 0
36012- 0 0 0 0 0 1 0 0 1 0 0 0
36013- 0 0 0 0 0 0 0 0 0 0 0 0
36014- 0 0 0 0 0 0 0 0 0 0 0 0
36015- 0 0 0 0 0 0 0 0 0 0 0 0
36016- 0 0 0 0 0 0 0 0 0 0 0 0
36017- 0 0 0 0 0 0 0 0 0 0 0 0
36018- 0 0 0 0 0 0 0 0 0 0 0 0
36019- 6 6 6 14 14 14 26 26 26 42 42 42
36020- 54 54 54 66 66 66 78 78 78 78 78 78
36021- 78 78 78 74 74 74 66 66 66 54 54 54
36022- 42 42 42 26 26 26 18 18 18 10 10 10
36023- 6 6 6 0 0 0 0 0 0 0 0 0
36024- 0 0 0 0 0 0 0 0 0 0 0 0
36025- 0 0 0 0 0 0 0 0 0 0 0 0
36026- 0 0 0 0 0 0 0 0 0 0 0 0
36027- 0 0 0 0 0 0 0 0 0 0 0 0
36028- 0 0 0 0 0 0 0 0 0 0 0 0
36029- 0 0 0 0 0 0 0 0 0 0 0 0
36030- 0 0 0 0 0 0 0 0 0 0 0 0
36031- 0 0 0 0 0 0 0 0 0 0 0 0
36032- 0 0 1 0 0 0 0 0 0 0 0 0
36033- 0 0 0 0 0 0 0 0 0 0 0 0
36034- 0 0 0 0 0 0 0 0 0 0 0 0
36035- 0 0 0 0 0 0 0 0 0 0 0 0
36036- 0 0 0 0 0 0 0 0 0 0 0 0
36037- 0 0 0 0 0 0 0 0 0 0 0 0
36038- 0 0 0 0 0 0 0 0 0 10 10 10
36039- 22 22 22 42 42 42 66 66 66 86 86 86
36040- 66 66 66 38 38 38 38 38 38 22 22 22
36041- 26 26 26 34 34 34 54 54 54 66 66 66
36042- 86 86 86 70 70 70 46 46 46 26 26 26
36043- 14 14 14 6 6 6 0 0 0 0 0 0
36044- 0 0 0 0 0 0 0 0 0 0 0 0
36045- 0 0 0 0 0 0 0 0 0 0 0 0
36046- 0 0 0 0 0 0 0 0 0 0 0 0
36047- 0 0 0 0 0 0 0 0 0 0 0 0
36048- 0 0 0 0 0 0 0 0 0 0 0 0
36049- 0 0 0 0 0 0 0 0 0 0 0 0
36050- 0 0 0 0 0 0 0 0 0 0 0 0
36051- 0 0 0 0 0 0 0 0 0 0 0 0
36052- 0 0 1 0 0 1 0 0 1 0 0 0
36053- 0 0 0 0 0 0 0 0 0 0 0 0
36054- 0 0 0 0 0 0 0 0 0 0 0 0
36055- 0 0 0 0 0 0 0 0 0 0 0 0
36056- 0 0 0 0 0 0 0 0 0 0 0 0
36057- 0 0 0 0 0 0 0 0 0 0 0 0
36058- 0 0 0 0 0 0 10 10 10 26 26 26
36059- 50 50 50 82 82 82 58 58 58 6 6 6
36060- 2 2 6 2 2 6 2 2 6 2 2 6
36061- 2 2 6 2 2 6 2 2 6 2 2 6
36062- 6 6 6 54 54 54 86 86 86 66 66 66
36063- 38 38 38 18 18 18 6 6 6 0 0 0
36064- 0 0 0 0 0 0 0 0 0 0 0 0
36065- 0 0 0 0 0 0 0 0 0 0 0 0
36066- 0 0 0 0 0 0 0 0 0 0 0 0
36067- 0 0 0 0 0 0 0 0 0 0 0 0
36068- 0 0 0 0 0 0 0 0 0 0 0 0
36069- 0 0 0 0 0 0 0 0 0 0 0 0
36070- 0 0 0 0 0 0 0 0 0 0 0 0
36071- 0 0 0 0 0 0 0 0 0 0 0 0
36072- 0 0 0 0 0 0 0 0 0 0 0 0
36073- 0 0 0 0 0 0 0 0 0 0 0 0
36074- 0 0 0 0 0 0 0 0 0 0 0 0
36075- 0 0 0 0 0 0 0 0 0 0 0 0
36076- 0 0 0 0 0 0 0 0 0 0 0 0
36077- 0 0 0 0 0 0 0 0 0 0 0 0
36078- 0 0 0 6 6 6 22 22 22 50 50 50
36079- 78 78 78 34 34 34 2 2 6 2 2 6
36080- 2 2 6 2 2 6 2 2 6 2 2 6
36081- 2 2 6 2 2 6 2 2 6 2 2 6
36082- 2 2 6 2 2 6 6 6 6 70 70 70
36083- 78 78 78 46 46 46 22 22 22 6 6 6
36084- 0 0 0 0 0 0 0 0 0 0 0 0
36085- 0 0 0 0 0 0 0 0 0 0 0 0
36086- 0 0 0 0 0 0 0 0 0 0 0 0
36087- 0 0 0 0 0 0 0 0 0 0 0 0
36088- 0 0 0 0 0 0 0 0 0 0 0 0
36089- 0 0 0 0 0 0 0 0 0 0 0 0
36090- 0 0 0 0 0 0 0 0 0 0 0 0
36091- 0 0 0 0 0 0 0 0 0 0 0 0
36092- 0 0 1 0 0 1 0 0 1 0 0 0
36093- 0 0 0 0 0 0 0 0 0 0 0 0
36094- 0 0 0 0 0 0 0 0 0 0 0 0
36095- 0 0 0 0 0 0 0 0 0 0 0 0
36096- 0 0 0 0 0 0 0 0 0 0 0 0
36097- 0 0 0 0 0 0 0 0 0 0 0 0
36098- 6 6 6 18 18 18 42 42 42 82 82 82
36099- 26 26 26 2 2 6 2 2 6 2 2 6
36100- 2 2 6 2 2 6 2 2 6 2 2 6
36101- 2 2 6 2 2 6 2 2 6 14 14 14
36102- 46 46 46 34 34 34 6 6 6 2 2 6
36103- 42 42 42 78 78 78 42 42 42 18 18 18
36104- 6 6 6 0 0 0 0 0 0 0 0 0
36105- 0 0 0 0 0 0 0 0 0 0 0 0
36106- 0 0 0 0 0 0 0 0 0 0 0 0
36107- 0 0 0 0 0 0 0 0 0 0 0 0
36108- 0 0 0 0 0 0 0 0 0 0 0 0
36109- 0 0 0 0 0 0 0 0 0 0 0 0
36110- 0 0 0 0 0 0 0 0 0 0 0 0
36111- 0 0 0 0 0 0 0 0 0 0 0 0
36112- 0 0 1 0 0 0 0 0 1 0 0 0
36113- 0 0 0 0 0 0 0 0 0 0 0 0
36114- 0 0 0 0 0 0 0 0 0 0 0 0
36115- 0 0 0 0 0 0 0 0 0 0 0 0
36116- 0 0 0 0 0 0 0 0 0 0 0 0
36117- 0 0 0 0 0 0 0 0 0 0 0 0
36118- 10 10 10 30 30 30 66 66 66 58 58 58
36119- 2 2 6 2 2 6 2 2 6 2 2 6
36120- 2 2 6 2 2 6 2 2 6 2 2 6
36121- 2 2 6 2 2 6 2 2 6 26 26 26
36122- 86 86 86 101 101 101 46 46 46 10 10 10
36123- 2 2 6 58 58 58 70 70 70 34 34 34
36124- 10 10 10 0 0 0 0 0 0 0 0 0
36125- 0 0 0 0 0 0 0 0 0 0 0 0
36126- 0 0 0 0 0 0 0 0 0 0 0 0
36127- 0 0 0 0 0 0 0 0 0 0 0 0
36128- 0 0 0 0 0 0 0 0 0 0 0 0
36129- 0 0 0 0 0 0 0 0 0 0 0 0
36130- 0 0 0 0 0 0 0 0 0 0 0 0
36131- 0 0 0 0 0 0 0 0 0 0 0 0
36132- 0 0 1 0 0 1 0 0 1 0 0 0
36133- 0 0 0 0 0 0 0 0 0 0 0 0
36134- 0 0 0 0 0 0 0 0 0 0 0 0
36135- 0 0 0 0 0 0 0 0 0 0 0 0
36136- 0 0 0 0 0 0 0 0 0 0 0 0
36137- 0 0 0 0 0 0 0 0 0 0 0 0
36138- 14 14 14 42 42 42 86 86 86 10 10 10
36139- 2 2 6 2 2 6 2 2 6 2 2 6
36140- 2 2 6 2 2 6 2 2 6 2 2 6
36141- 2 2 6 2 2 6 2 2 6 30 30 30
36142- 94 94 94 94 94 94 58 58 58 26 26 26
36143- 2 2 6 6 6 6 78 78 78 54 54 54
36144- 22 22 22 6 6 6 0 0 0 0 0 0
36145- 0 0 0 0 0 0 0 0 0 0 0 0
36146- 0 0 0 0 0 0 0 0 0 0 0 0
36147- 0 0 0 0 0 0 0 0 0 0 0 0
36148- 0 0 0 0 0 0 0 0 0 0 0 0
36149- 0 0 0 0 0 0 0 0 0 0 0 0
36150- 0 0 0 0 0 0 0 0 0 0 0 0
36151- 0 0 0 0 0 0 0 0 0 0 0 0
36152- 0 0 0 0 0 0 0 0 0 0 0 0
36153- 0 0 0 0 0 0 0 0 0 0 0 0
36154- 0 0 0 0 0 0 0 0 0 0 0 0
36155- 0 0 0 0 0 0 0 0 0 0 0 0
36156- 0 0 0 0 0 0 0 0 0 0 0 0
36157- 0 0 0 0 0 0 0 0 0 6 6 6
36158- 22 22 22 62 62 62 62 62 62 2 2 6
36159- 2 2 6 2 2 6 2 2 6 2 2 6
36160- 2 2 6 2 2 6 2 2 6 2 2 6
36161- 2 2 6 2 2 6 2 2 6 26 26 26
36162- 54 54 54 38 38 38 18 18 18 10 10 10
36163- 2 2 6 2 2 6 34 34 34 82 82 82
36164- 38 38 38 14 14 14 0 0 0 0 0 0
36165- 0 0 0 0 0 0 0 0 0 0 0 0
36166- 0 0 0 0 0 0 0 0 0 0 0 0
36167- 0 0 0 0 0 0 0 0 0 0 0 0
36168- 0 0 0 0 0 0 0 0 0 0 0 0
36169- 0 0 0 0 0 0 0 0 0 0 0 0
36170- 0 0 0 0 0 0 0 0 0 0 0 0
36171- 0 0 0 0 0 0 0 0 0 0 0 0
36172- 0 0 0 0 0 1 0 0 1 0 0 0
36173- 0 0 0 0 0 0 0 0 0 0 0 0
36174- 0 0 0 0 0 0 0 0 0 0 0 0
36175- 0 0 0 0 0 0 0 0 0 0 0 0
36176- 0 0 0 0 0 0 0 0 0 0 0 0
36177- 0 0 0 0 0 0 0 0 0 6 6 6
36178- 30 30 30 78 78 78 30 30 30 2 2 6
36179- 2 2 6 2 2 6 2 2 6 2 2 6
36180- 2 2 6 2 2 6 2 2 6 2 2 6
36181- 2 2 6 2 2 6 2 2 6 10 10 10
36182- 10 10 10 2 2 6 2 2 6 2 2 6
36183- 2 2 6 2 2 6 2 2 6 78 78 78
36184- 50 50 50 18 18 18 6 6 6 0 0 0
36185- 0 0 0 0 0 0 0 0 0 0 0 0
36186- 0 0 0 0 0 0 0 0 0 0 0 0
36187- 0 0 0 0 0 0 0 0 0 0 0 0
36188- 0 0 0 0 0 0 0 0 0 0 0 0
36189- 0 0 0 0 0 0 0 0 0 0 0 0
36190- 0 0 0 0 0 0 0 0 0 0 0 0
36191- 0 0 0 0 0 0 0 0 0 0 0 0
36192- 0 0 1 0 0 0 0 0 0 0 0 0
36193- 0 0 0 0 0 0 0 0 0 0 0 0
36194- 0 0 0 0 0 0 0 0 0 0 0 0
36195- 0 0 0 0 0 0 0 0 0 0 0 0
36196- 0 0 0 0 0 0 0 0 0 0 0 0
36197- 0 0 0 0 0 0 0 0 0 10 10 10
36198- 38 38 38 86 86 86 14 14 14 2 2 6
36199- 2 2 6 2 2 6 2 2 6 2 2 6
36200- 2 2 6 2 2 6 2 2 6 2 2 6
36201- 2 2 6 2 2 6 2 2 6 2 2 6
36202- 2 2 6 2 2 6 2 2 6 2 2 6
36203- 2 2 6 2 2 6 2 2 6 54 54 54
36204- 66 66 66 26 26 26 6 6 6 0 0 0
36205- 0 0 0 0 0 0 0 0 0 0 0 0
36206- 0 0 0 0 0 0 0 0 0 0 0 0
36207- 0 0 0 0 0 0 0 0 0 0 0 0
36208- 0 0 0 0 0 0 0 0 0 0 0 0
36209- 0 0 0 0 0 0 0 0 0 0 0 0
36210- 0 0 0 0 0 0 0 0 0 0 0 0
36211- 0 0 0 0 0 0 0 0 0 0 0 0
36212- 0 0 0 0 0 1 0 0 1 0 0 0
36213- 0 0 0 0 0 0 0 0 0 0 0 0
36214- 0 0 0 0 0 0 0 0 0 0 0 0
36215- 0 0 0 0 0 0 0 0 0 0 0 0
36216- 0 0 0 0 0 0 0 0 0 0 0 0
36217- 0 0 0 0 0 0 0 0 0 14 14 14
36218- 42 42 42 82 82 82 2 2 6 2 2 6
36219- 2 2 6 6 6 6 10 10 10 2 2 6
36220- 2 2 6 2 2 6 2 2 6 2 2 6
36221- 2 2 6 2 2 6 2 2 6 6 6 6
36222- 14 14 14 10 10 10 2 2 6 2 2 6
36223- 2 2 6 2 2 6 2 2 6 18 18 18
36224- 82 82 82 34 34 34 10 10 10 0 0 0
36225- 0 0 0 0 0 0 0 0 0 0 0 0
36226- 0 0 0 0 0 0 0 0 0 0 0 0
36227- 0 0 0 0 0 0 0 0 0 0 0 0
36228- 0 0 0 0 0 0 0 0 0 0 0 0
36229- 0 0 0 0 0 0 0 0 0 0 0 0
36230- 0 0 0 0 0 0 0 0 0 0 0 0
36231- 0 0 0 0 0 0 0 0 0 0 0 0
36232- 0 0 1 0 0 0 0 0 0 0 0 0
36233- 0 0 0 0 0 0 0 0 0 0 0 0
36234- 0 0 0 0 0 0 0 0 0 0 0 0
36235- 0 0 0 0 0 0 0 0 0 0 0 0
36236- 0 0 0 0 0 0 0 0 0 0 0 0
36237- 0 0 0 0 0 0 0 0 0 14 14 14
36238- 46 46 46 86 86 86 2 2 6 2 2 6
36239- 6 6 6 6 6 6 22 22 22 34 34 34
36240- 6 6 6 2 2 6 2 2 6 2 2 6
36241- 2 2 6 2 2 6 18 18 18 34 34 34
36242- 10 10 10 50 50 50 22 22 22 2 2 6
36243- 2 2 6 2 2 6 2 2 6 10 10 10
36244- 86 86 86 42 42 42 14 14 14 0 0 0
36245- 0 0 0 0 0 0 0 0 0 0 0 0
36246- 0 0 0 0 0 0 0 0 0 0 0 0
36247- 0 0 0 0 0 0 0 0 0 0 0 0
36248- 0 0 0 0 0 0 0 0 0 0 0 0
36249- 0 0 0 0 0 0 0 0 0 0 0 0
36250- 0 0 0 0 0 0 0 0 0 0 0 0
36251- 0 0 0 0 0 0 0 0 0 0 0 0
36252- 0 0 1 0 0 1 0 0 1 0 0 0
36253- 0 0 0 0 0 0 0 0 0 0 0 0
36254- 0 0 0 0 0 0 0 0 0 0 0 0
36255- 0 0 0 0 0 0 0 0 0 0 0 0
36256- 0 0 0 0 0 0 0 0 0 0 0 0
36257- 0 0 0 0 0 0 0 0 0 14 14 14
36258- 46 46 46 86 86 86 2 2 6 2 2 6
36259- 38 38 38 116 116 116 94 94 94 22 22 22
36260- 22 22 22 2 2 6 2 2 6 2 2 6
36261- 14 14 14 86 86 86 138 138 138 162 162 162
36262-154 154 154 38 38 38 26 26 26 6 6 6
36263- 2 2 6 2 2 6 2 2 6 2 2 6
36264- 86 86 86 46 46 46 14 14 14 0 0 0
36265- 0 0 0 0 0 0 0 0 0 0 0 0
36266- 0 0 0 0 0 0 0 0 0 0 0 0
36267- 0 0 0 0 0 0 0 0 0 0 0 0
36268- 0 0 0 0 0 0 0 0 0 0 0 0
36269- 0 0 0 0 0 0 0 0 0 0 0 0
36270- 0 0 0 0 0 0 0 0 0 0 0 0
36271- 0 0 0 0 0 0 0 0 0 0 0 0
36272- 0 0 0 0 0 0 0 0 0 0 0 0
36273- 0 0 0 0 0 0 0 0 0 0 0 0
36274- 0 0 0 0 0 0 0 0 0 0 0 0
36275- 0 0 0 0 0 0 0 0 0 0 0 0
36276- 0 0 0 0 0 0 0 0 0 0 0 0
36277- 0 0 0 0 0 0 0 0 0 14 14 14
36278- 46 46 46 86 86 86 2 2 6 14 14 14
36279-134 134 134 198 198 198 195 195 195 116 116 116
36280- 10 10 10 2 2 6 2 2 6 6 6 6
36281-101 98 89 187 187 187 210 210 210 218 218 218
36282-214 214 214 134 134 134 14 14 14 6 6 6
36283- 2 2 6 2 2 6 2 2 6 2 2 6
36284- 86 86 86 50 50 50 18 18 18 6 6 6
36285- 0 0 0 0 0 0 0 0 0 0 0 0
36286- 0 0 0 0 0 0 0 0 0 0 0 0
36287- 0 0 0 0 0 0 0 0 0 0 0 0
36288- 0 0 0 0 0 0 0 0 0 0 0 0
36289- 0 0 0 0 0 0 0 0 0 0 0 0
36290- 0 0 0 0 0 0 0 0 0 0 0 0
36291- 0 0 0 0 0 0 0 0 1 0 0 0
36292- 0 0 1 0 0 1 0 0 1 0 0 0
36293- 0 0 0 0 0 0 0 0 0 0 0 0
36294- 0 0 0 0 0 0 0 0 0 0 0 0
36295- 0 0 0 0 0 0 0 0 0 0 0 0
36296- 0 0 0 0 0 0 0 0 0 0 0 0
36297- 0 0 0 0 0 0 0 0 0 14 14 14
36298- 46 46 46 86 86 86 2 2 6 54 54 54
36299-218 218 218 195 195 195 226 226 226 246 246 246
36300- 58 58 58 2 2 6 2 2 6 30 30 30
36301-210 210 210 253 253 253 174 174 174 123 123 123
36302-221 221 221 234 234 234 74 74 74 2 2 6
36303- 2 2 6 2 2 6 2 2 6 2 2 6
36304- 70 70 70 58 58 58 22 22 22 6 6 6
36305- 0 0 0 0 0 0 0 0 0 0 0 0
36306- 0 0 0 0 0 0 0 0 0 0 0 0
36307- 0 0 0 0 0 0 0 0 0 0 0 0
36308- 0 0 0 0 0 0 0 0 0 0 0 0
36309- 0 0 0 0 0 0 0 0 0 0 0 0
36310- 0 0 0 0 0 0 0 0 0 0 0 0
36311- 0 0 0 0 0 0 0 0 0 0 0 0
36312- 0 0 0 0 0 0 0 0 0 0 0 0
36313- 0 0 0 0 0 0 0 0 0 0 0 0
36314- 0 0 0 0 0 0 0 0 0 0 0 0
36315- 0 0 0 0 0 0 0 0 0 0 0 0
36316- 0 0 0 0 0 0 0 0 0 0 0 0
36317- 0 0 0 0 0 0 0 0 0 14 14 14
36318- 46 46 46 82 82 82 2 2 6 106 106 106
36319-170 170 170 26 26 26 86 86 86 226 226 226
36320-123 123 123 10 10 10 14 14 14 46 46 46
36321-231 231 231 190 190 190 6 6 6 70 70 70
36322- 90 90 90 238 238 238 158 158 158 2 2 6
36323- 2 2 6 2 2 6 2 2 6 2 2 6
36324- 70 70 70 58 58 58 22 22 22 6 6 6
36325- 0 0 0 0 0 0 0 0 0 0 0 0
36326- 0 0 0 0 0 0 0 0 0 0 0 0
36327- 0 0 0 0 0 0 0 0 0 0 0 0
36328- 0 0 0 0 0 0 0 0 0 0 0 0
36329- 0 0 0 0 0 0 0 0 0 0 0 0
36330- 0 0 0 0 0 0 0 0 0 0 0 0
36331- 0 0 0 0 0 0 0 0 1 0 0 0
36332- 0 0 1 0 0 1 0 0 1 0 0 0
36333- 0 0 0 0 0 0 0 0 0 0 0 0
36334- 0 0 0 0 0 0 0 0 0 0 0 0
36335- 0 0 0 0 0 0 0 0 0 0 0 0
36336- 0 0 0 0 0 0 0 0 0 0 0 0
36337- 0 0 0 0 0 0 0 0 0 14 14 14
36338- 42 42 42 86 86 86 6 6 6 116 116 116
36339-106 106 106 6 6 6 70 70 70 149 149 149
36340-128 128 128 18 18 18 38 38 38 54 54 54
36341-221 221 221 106 106 106 2 2 6 14 14 14
36342- 46 46 46 190 190 190 198 198 198 2 2 6
36343- 2 2 6 2 2 6 2 2 6 2 2 6
36344- 74 74 74 62 62 62 22 22 22 6 6 6
36345- 0 0 0 0 0 0 0 0 0 0 0 0
36346- 0 0 0 0 0 0 0 0 0 0 0 0
36347- 0 0 0 0 0 0 0 0 0 0 0 0
36348- 0 0 0 0 0 0 0 0 0 0 0 0
36349- 0 0 0 0 0 0 0 0 0 0 0 0
36350- 0 0 0 0 0 0 0 0 0 0 0 0
36351- 0 0 0 0 0 0 0 0 1 0 0 0
36352- 0 0 1 0 0 0 0 0 1 0 0 0
36353- 0 0 0 0 0 0 0 0 0 0 0 0
36354- 0 0 0 0 0 0 0 0 0 0 0 0
36355- 0 0 0 0 0 0 0 0 0 0 0 0
36356- 0 0 0 0 0 0 0 0 0 0 0 0
36357- 0 0 0 0 0 0 0 0 0 14 14 14
36358- 42 42 42 94 94 94 14 14 14 101 101 101
36359-128 128 128 2 2 6 18 18 18 116 116 116
36360-118 98 46 121 92 8 121 92 8 98 78 10
36361-162 162 162 106 106 106 2 2 6 2 2 6
36362- 2 2 6 195 195 195 195 195 195 6 6 6
36363- 2 2 6 2 2 6 2 2 6 2 2 6
36364- 74 74 74 62 62 62 22 22 22 6 6 6
36365- 0 0 0 0 0 0 0 0 0 0 0 0
36366- 0 0 0 0 0 0 0 0 0 0 0 0
36367- 0 0 0 0 0 0 0 0 0 0 0 0
36368- 0 0 0 0 0 0 0 0 0 0 0 0
36369- 0 0 0 0 0 0 0 0 0 0 0 0
36370- 0 0 0 0 0 0 0 0 0 0 0 0
36371- 0 0 0 0 0 0 0 0 1 0 0 1
36372- 0 0 1 0 0 0 0 0 1 0 0 0
36373- 0 0 0 0 0 0 0 0 0 0 0 0
36374- 0 0 0 0 0 0 0 0 0 0 0 0
36375- 0 0 0 0 0 0 0 0 0 0 0 0
36376- 0 0 0 0 0 0 0 0 0 0 0 0
36377- 0 0 0 0 0 0 0 0 0 10 10 10
36378- 38 38 38 90 90 90 14 14 14 58 58 58
36379-210 210 210 26 26 26 54 38 6 154 114 10
36380-226 170 11 236 186 11 225 175 15 184 144 12
36381-215 174 15 175 146 61 37 26 9 2 2 6
36382- 70 70 70 246 246 246 138 138 138 2 2 6
36383- 2 2 6 2 2 6 2 2 6 2 2 6
36384- 70 70 70 66 66 66 26 26 26 6 6 6
36385- 0 0 0 0 0 0 0 0 0 0 0 0
36386- 0 0 0 0 0 0 0 0 0 0 0 0
36387- 0 0 0 0 0 0 0 0 0 0 0 0
36388- 0 0 0 0 0 0 0 0 0 0 0 0
36389- 0 0 0 0 0 0 0 0 0 0 0 0
36390- 0 0 0 0 0 0 0 0 0 0 0 0
36391- 0 0 0 0 0 0 0 0 0 0 0 0
36392- 0 0 0 0 0 0 0 0 0 0 0 0
36393- 0 0 0 0 0 0 0 0 0 0 0 0
36394- 0 0 0 0 0 0 0 0 0 0 0 0
36395- 0 0 0 0 0 0 0 0 0 0 0 0
36396- 0 0 0 0 0 0 0 0 0 0 0 0
36397- 0 0 0 0 0 0 0 0 0 10 10 10
36398- 38 38 38 86 86 86 14 14 14 10 10 10
36399-195 195 195 188 164 115 192 133 9 225 175 15
36400-239 182 13 234 190 10 232 195 16 232 200 30
36401-245 207 45 241 208 19 232 195 16 184 144 12
36402-218 194 134 211 206 186 42 42 42 2 2 6
36403- 2 2 6 2 2 6 2 2 6 2 2 6
36404- 50 50 50 74 74 74 30 30 30 6 6 6
36405- 0 0 0 0 0 0 0 0 0 0 0 0
36406- 0 0 0 0 0 0 0 0 0 0 0 0
36407- 0 0 0 0 0 0 0 0 0 0 0 0
36408- 0 0 0 0 0 0 0 0 0 0 0 0
36409- 0 0 0 0 0 0 0 0 0 0 0 0
36410- 0 0 0 0 0 0 0 0 0 0 0 0
36411- 0 0 0 0 0 0 0 0 0 0 0 0
36412- 0 0 0 0 0 0 0 0 0 0 0 0
36413- 0 0 0 0 0 0 0 0 0 0 0 0
36414- 0 0 0 0 0 0 0 0 0 0 0 0
36415- 0 0 0 0 0 0 0 0 0 0 0 0
36416- 0 0 0 0 0 0 0 0 0 0 0 0
36417- 0 0 0 0 0 0 0 0 0 10 10 10
36418- 34 34 34 86 86 86 14 14 14 2 2 6
36419-121 87 25 192 133 9 219 162 10 239 182 13
36420-236 186 11 232 195 16 241 208 19 244 214 54
36421-246 218 60 246 218 38 246 215 20 241 208 19
36422-241 208 19 226 184 13 121 87 25 2 2 6
36423- 2 2 6 2 2 6 2 2 6 2 2 6
36424- 50 50 50 82 82 82 34 34 34 10 10 10
36425- 0 0 0 0 0 0 0 0 0 0 0 0
36426- 0 0 0 0 0 0 0 0 0 0 0 0
36427- 0 0 0 0 0 0 0 0 0 0 0 0
36428- 0 0 0 0 0 0 0 0 0 0 0 0
36429- 0 0 0 0 0 0 0 0 0 0 0 0
36430- 0 0 0 0 0 0 0 0 0 0 0 0
36431- 0 0 0 0 0 0 0 0 0 0 0 0
36432- 0 0 0 0 0 0 0 0 0 0 0 0
36433- 0 0 0 0 0 0 0 0 0 0 0 0
36434- 0 0 0 0 0 0 0 0 0 0 0 0
36435- 0 0 0 0 0 0 0 0 0 0 0 0
36436- 0 0 0 0 0 0 0 0 0 0 0 0
36437- 0 0 0 0 0 0 0 0 0 10 10 10
36438- 34 34 34 82 82 82 30 30 30 61 42 6
36439-180 123 7 206 145 10 230 174 11 239 182 13
36440-234 190 10 238 202 15 241 208 19 246 218 74
36441-246 218 38 246 215 20 246 215 20 246 215 20
36442-226 184 13 215 174 15 184 144 12 6 6 6
36443- 2 2 6 2 2 6 2 2 6 2 2 6
36444- 26 26 26 94 94 94 42 42 42 14 14 14
36445- 0 0 0 0 0 0 0 0 0 0 0 0
36446- 0 0 0 0 0 0 0 0 0 0 0 0
36447- 0 0 0 0 0 0 0 0 0 0 0 0
36448- 0 0 0 0 0 0 0 0 0 0 0 0
36449- 0 0 0 0 0 0 0 0 0 0 0 0
36450- 0 0 0 0 0 0 0 0 0 0 0 0
36451- 0 0 0 0 0 0 0 0 0 0 0 0
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 10 10 10
36458- 30 30 30 78 78 78 50 50 50 104 69 6
36459-192 133 9 216 158 10 236 178 12 236 186 11
36460-232 195 16 241 208 19 244 214 54 245 215 43
36461-246 215 20 246 215 20 241 208 19 198 155 10
36462-200 144 11 216 158 10 156 118 10 2 2 6
36463- 2 2 6 2 2 6 2 2 6 2 2 6
36464- 6 6 6 90 90 90 54 54 54 18 18 18
36465- 6 6 6 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 10 10 10
36478- 30 30 30 78 78 78 46 46 46 22 22 22
36479-137 92 6 210 162 10 239 182 13 238 190 10
36480-238 202 15 241 208 19 246 215 20 246 215 20
36481-241 208 19 203 166 17 185 133 11 210 150 10
36482-216 158 10 210 150 10 102 78 10 2 2 6
36483- 6 6 6 54 54 54 14 14 14 2 2 6
36484- 2 2 6 62 62 62 74 74 74 30 30 30
36485- 10 10 10 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 0 0 0 0 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 10 10 10
36498- 34 34 34 78 78 78 50 50 50 6 6 6
36499- 94 70 30 139 102 15 190 146 13 226 184 13
36500-232 200 30 232 195 16 215 174 15 190 146 13
36501-168 122 10 192 133 9 210 150 10 213 154 11
36502-202 150 34 182 157 106 101 98 89 2 2 6
36503- 2 2 6 78 78 78 116 116 116 58 58 58
36504- 2 2 6 22 22 22 90 90 90 46 46 46
36505- 18 18 18 6 6 6 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 0 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 10 10 10
36518- 38 38 38 86 86 86 50 50 50 6 6 6
36519-128 128 128 174 154 114 156 107 11 168 122 10
36520-198 155 10 184 144 12 197 138 11 200 144 11
36521-206 145 10 206 145 10 197 138 11 188 164 115
36522-195 195 195 198 198 198 174 174 174 14 14 14
36523- 2 2 6 22 22 22 116 116 116 116 116 116
36524- 22 22 22 2 2 6 74 74 74 70 70 70
36525- 30 30 30 10 10 10 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 0 0 0 0 0 0 0 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 6 6 6 18 18 18
36538- 50 50 50 101 101 101 26 26 26 10 10 10
36539-138 138 138 190 190 190 174 154 114 156 107 11
36540-197 138 11 200 144 11 197 138 11 192 133 9
36541-180 123 7 190 142 34 190 178 144 187 187 187
36542-202 202 202 221 221 221 214 214 214 66 66 66
36543- 2 2 6 2 2 6 50 50 50 62 62 62
36544- 6 6 6 2 2 6 10 10 10 90 90 90
36545- 50 50 50 18 18 18 6 6 6 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 10 10 10 34 34 34
36558- 74 74 74 74 74 74 2 2 6 6 6 6
36559-144 144 144 198 198 198 190 190 190 178 166 146
36560-154 121 60 156 107 11 156 107 11 168 124 44
36561-174 154 114 187 187 187 190 190 190 210 210 210
36562-246 246 246 253 253 253 253 253 253 182 182 182
36563- 6 6 6 2 2 6 2 2 6 2 2 6
36564- 2 2 6 2 2 6 2 2 6 62 62 62
36565- 74 74 74 34 34 34 14 14 14 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 0 0 0 0 0 0 0 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 10 10 10 22 22 22 54 54 54
36578- 94 94 94 18 18 18 2 2 6 46 46 46
36579-234 234 234 221 221 221 190 190 190 190 190 190
36580-190 190 190 187 187 187 187 187 187 190 190 190
36581-190 190 190 195 195 195 214 214 214 242 242 242
36582-253 253 253 253 253 253 253 253 253 253 253 253
36583- 82 82 82 2 2 6 2 2 6 2 2 6
36584- 2 2 6 2 2 6 2 2 6 14 14 14
36585- 86 86 86 54 54 54 22 22 22 6 6 6
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 0 0 0 0 0 0 0 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 6 6 6 18 18 18 46 46 46 90 90 90
36598- 46 46 46 18 18 18 6 6 6 182 182 182
36599-253 253 253 246 246 246 206 206 206 190 190 190
36600-190 190 190 190 190 190 190 190 190 190 190 190
36601-206 206 206 231 231 231 250 250 250 253 253 253
36602-253 253 253 253 253 253 253 253 253 253 253 253
36603-202 202 202 14 14 14 2 2 6 2 2 6
36604- 2 2 6 2 2 6 2 2 6 2 2 6
36605- 42 42 42 86 86 86 42 42 42 18 18 18
36606- 6 6 6 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 0 0 0 0 0 0 0 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 6 6 6
36617- 14 14 14 38 38 38 74 74 74 66 66 66
36618- 2 2 6 6 6 6 90 90 90 250 250 250
36619-253 253 253 253 253 253 238 238 238 198 198 198
36620-190 190 190 190 190 190 195 195 195 221 221 221
36621-246 246 246 253 253 253 253 253 253 253 253 253
36622-253 253 253 253 253 253 253 253 253 253 253 253
36623-253 253 253 82 82 82 2 2 6 2 2 6
36624- 2 2 6 2 2 6 2 2 6 2 2 6
36625- 2 2 6 78 78 78 70 70 70 34 34 34
36626- 14 14 14 6 6 6 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 14 14 14
36637- 34 34 34 66 66 66 78 78 78 6 6 6
36638- 2 2 6 18 18 18 218 218 218 253 253 253
36639-253 253 253 253 253 253 253 253 253 246 246 246
36640-226 226 226 231 231 231 246 246 246 253 253 253
36641-253 253 253 253 253 253 253 253 253 253 253 253
36642-253 253 253 253 253 253 253 253 253 253 253 253
36643-253 253 253 178 178 178 2 2 6 2 2 6
36644- 2 2 6 2 2 6 2 2 6 2 2 6
36645- 2 2 6 18 18 18 90 90 90 62 62 62
36646- 30 30 30 10 10 10 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 0 0 0 0 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 10 10 10 26 26 26
36657- 58 58 58 90 90 90 18 18 18 2 2 6
36658- 2 2 6 110 110 110 253 253 253 253 253 253
36659-253 253 253 253 253 253 253 253 253 253 253 253
36660-250 250 250 253 253 253 253 253 253 253 253 253
36661-253 253 253 253 253 253 253 253 253 253 253 253
36662-253 253 253 253 253 253 253 253 253 253 253 253
36663-253 253 253 231 231 231 18 18 18 2 2 6
36664- 2 2 6 2 2 6 2 2 6 2 2 6
36665- 2 2 6 2 2 6 18 18 18 94 94 94
36666- 54 54 54 26 26 26 10 10 10 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 0 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 6 6 6 22 22 22 50 50 50
36677- 90 90 90 26 26 26 2 2 6 2 2 6
36678- 14 14 14 195 195 195 250 250 250 253 253 253
36679-253 253 253 253 253 253 253 253 253 253 253 253
36680-253 253 253 253 253 253 253 253 253 253 253 253
36681-253 253 253 253 253 253 253 253 253 253 253 253
36682-253 253 253 253 253 253 253 253 253 253 253 253
36683-250 250 250 242 242 242 54 54 54 2 2 6
36684- 2 2 6 2 2 6 2 2 6 2 2 6
36685- 2 2 6 2 2 6 2 2 6 38 38 38
36686- 86 86 86 50 50 50 22 22 22 6 6 6
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 0 0 0 0 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 6 6 6 14 14 14 38 38 38 82 82 82
36697- 34 34 34 2 2 6 2 2 6 2 2 6
36698- 42 42 42 195 195 195 246 246 246 253 253 253
36699-253 253 253 253 253 253 253 253 253 250 250 250
36700-242 242 242 242 242 242 250 250 250 253 253 253
36701-253 253 253 253 253 253 253 253 253 253 253 253
36702-253 253 253 250 250 250 246 246 246 238 238 238
36703-226 226 226 231 231 231 101 101 101 6 6 6
36704- 2 2 6 2 2 6 2 2 6 2 2 6
36705- 2 2 6 2 2 6 2 2 6 2 2 6
36706- 38 38 38 82 82 82 42 42 42 14 14 14
36707- 6 6 6 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 0 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 10 10 10 26 26 26 62 62 62 66 66 66
36717- 2 2 6 2 2 6 2 2 6 6 6 6
36718- 70 70 70 170 170 170 206 206 206 234 234 234
36719-246 246 246 250 250 250 250 250 250 238 238 238
36720-226 226 226 231 231 231 238 238 238 250 250 250
36721-250 250 250 250 250 250 246 246 246 231 231 231
36722-214 214 214 206 206 206 202 202 202 202 202 202
36723-198 198 198 202 202 202 182 182 182 18 18 18
36724- 2 2 6 2 2 6 2 2 6 2 2 6
36725- 2 2 6 2 2 6 2 2 6 2 2 6
36726- 2 2 6 62 62 62 66 66 66 30 30 30
36727- 10 10 10 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 0 0 0 0 0 0 0 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 14 14 14 42 42 42 82 82 82 18 18 18
36737- 2 2 6 2 2 6 2 2 6 10 10 10
36738- 94 94 94 182 182 182 218 218 218 242 242 242
36739-250 250 250 253 253 253 253 253 253 250 250 250
36740-234 234 234 253 253 253 253 253 253 253 253 253
36741-253 253 253 253 253 253 253 253 253 246 246 246
36742-238 238 238 226 226 226 210 210 210 202 202 202
36743-195 195 195 195 195 195 210 210 210 158 158 158
36744- 6 6 6 14 14 14 50 50 50 14 14 14
36745- 2 2 6 2 2 6 2 2 6 2 2 6
36746- 2 2 6 6 6 6 86 86 86 46 46 46
36747- 18 18 18 6 6 6 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 6 6 6
36756- 22 22 22 54 54 54 70 70 70 2 2 6
36757- 2 2 6 10 10 10 2 2 6 22 22 22
36758-166 166 166 231 231 231 250 250 250 253 253 253
36759-253 253 253 253 253 253 253 253 253 250 250 250
36760-242 242 242 253 253 253 253 253 253 253 253 253
36761-253 253 253 253 253 253 253 253 253 253 253 253
36762-253 253 253 253 253 253 253 253 253 246 246 246
36763-231 231 231 206 206 206 198 198 198 226 226 226
36764- 94 94 94 2 2 6 6 6 6 38 38 38
36765- 30 30 30 2 2 6 2 2 6 2 2 6
36766- 2 2 6 2 2 6 62 62 62 66 66 66
36767- 26 26 26 10 10 10 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 0 0 0 0
36773- 0 0 0 0 0 0 0 0 0 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 10 10 10
36776- 30 30 30 74 74 74 50 50 50 2 2 6
36777- 26 26 26 26 26 26 2 2 6 106 106 106
36778-238 238 238 253 253 253 253 253 253 253 253 253
36779-253 253 253 253 253 253 253 253 253 253 253 253
36780-253 253 253 253 253 253 253 253 253 253 253 253
36781-253 253 253 253 253 253 253 253 253 253 253 253
36782-253 253 253 253 253 253 253 253 253 253 253 253
36783-253 253 253 246 246 246 218 218 218 202 202 202
36784-210 210 210 14 14 14 2 2 6 2 2 6
36785- 30 30 30 22 22 22 2 2 6 2 2 6
36786- 2 2 6 2 2 6 18 18 18 86 86 86
36787- 42 42 42 14 14 14 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 14 14 14
36796- 42 42 42 90 90 90 22 22 22 2 2 6
36797- 42 42 42 2 2 6 18 18 18 218 218 218
36798-253 253 253 253 253 253 253 253 253 253 253 253
36799-253 253 253 253 253 253 253 253 253 253 253 253
36800-253 253 253 253 253 253 253 253 253 253 253 253
36801-253 253 253 253 253 253 253 253 253 253 253 253
36802-253 253 253 253 253 253 253 253 253 253 253 253
36803-253 253 253 253 253 253 250 250 250 221 221 221
36804-218 218 218 101 101 101 2 2 6 14 14 14
36805- 18 18 18 38 38 38 10 10 10 2 2 6
36806- 2 2 6 2 2 6 2 2 6 78 78 78
36807- 58 58 58 22 22 22 6 6 6 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 0 0 0 0
36813- 0 0 0 0 0 0 0 0 0 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 6 6 6 18 18 18
36816- 54 54 54 82 82 82 2 2 6 26 26 26
36817- 22 22 22 2 2 6 123 123 123 253 253 253
36818-253 253 253 253 253 253 253 253 253 253 253 253
36819-253 253 253 253 253 253 253 253 253 253 253 253
36820-253 253 253 253 253 253 253 253 253 253 253 253
36821-253 253 253 253 253 253 253 253 253 253 253 253
36822-253 253 253 253 253 253 253 253 253 253 253 253
36823-253 253 253 253 253 253 253 253 253 250 250 250
36824-238 238 238 198 198 198 6 6 6 38 38 38
36825- 58 58 58 26 26 26 38 38 38 2 2 6
36826- 2 2 6 2 2 6 2 2 6 46 46 46
36827- 78 78 78 30 30 30 10 10 10 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 0 0 0 0
36833- 0 0 0 0 0 0 0 0 0 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 10 10 10 30 30 30
36836- 74 74 74 58 58 58 2 2 6 42 42 42
36837- 2 2 6 22 22 22 231 231 231 253 253 253
36838-253 253 253 253 253 253 253 253 253 253 253 253
36839-253 253 253 253 253 253 253 253 253 250 250 250
36840-253 253 253 253 253 253 253 253 253 253 253 253
36841-253 253 253 253 253 253 253 253 253 253 253 253
36842-253 253 253 253 253 253 253 253 253 253 253 253
36843-253 253 253 253 253 253 253 253 253 253 253 253
36844-253 253 253 246 246 246 46 46 46 38 38 38
36845- 42 42 42 14 14 14 38 38 38 14 14 14
36846- 2 2 6 2 2 6 2 2 6 6 6 6
36847- 86 86 86 46 46 46 14 14 14 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 0 0 0 0
36853- 0 0 0 0 0 0 0 0 0 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 6 6 6 14 14 14 42 42 42
36856- 90 90 90 18 18 18 18 18 18 26 26 26
36857- 2 2 6 116 116 116 253 253 253 253 253 253
36858-253 253 253 253 253 253 253 253 253 253 253 253
36859-253 253 253 253 253 253 250 250 250 238 238 238
36860-253 253 253 253 253 253 253 253 253 253 253 253
36861-253 253 253 253 253 253 253 253 253 253 253 253
36862-253 253 253 253 253 253 253 253 253 253 253 253
36863-253 253 253 253 253 253 253 253 253 253 253 253
36864-253 253 253 253 253 253 94 94 94 6 6 6
36865- 2 2 6 2 2 6 10 10 10 34 34 34
36866- 2 2 6 2 2 6 2 2 6 2 2 6
36867- 74 74 74 58 58 58 22 22 22 6 6 6
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 10 10 10 26 26 26 66 66 66
36876- 82 82 82 2 2 6 38 38 38 6 6 6
36877- 14 14 14 210 210 210 253 253 253 253 253 253
36878-253 253 253 253 253 253 253 253 253 253 253 253
36879-253 253 253 253 253 253 246 246 246 242 242 242
36880-253 253 253 253 253 253 253 253 253 253 253 253
36881-253 253 253 253 253 253 253 253 253 253 253 253
36882-253 253 253 253 253 253 253 253 253 253 253 253
36883-253 253 253 253 253 253 253 253 253 253 253 253
36884-253 253 253 253 253 253 144 144 144 2 2 6
36885- 2 2 6 2 2 6 2 2 6 46 46 46
36886- 2 2 6 2 2 6 2 2 6 2 2 6
36887- 42 42 42 74 74 74 30 30 30 10 10 10
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 6 6 6 14 14 14 42 42 42 90 90 90
36896- 26 26 26 6 6 6 42 42 42 2 2 6
36897- 74 74 74 250 250 250 253 253 253 253 253 253
36898-253 253 253 253 253 253 253 253 253 253 253 253
36899-253 253 253 253 253 253 242 242 242 242 242 242
36900-253 253 253 253 253 253 253 253 253 253 253 253
36901-253 253 253 253 253 253 253 253 253 253 253 253
36902-253 253 253 253 253 253 253 253 253 253 253 253
36903-253 253 253 253 253 253 253 253 253 253 253 253
36904-253 253 253 253 253 253 182 182 182 2 2 6
36905- 2 2 6 2 2 6 2 2 6 46 46 46
36906- 2 2 6 2 2 6 2 2 6 2 2 6
36907- 10 10 10 86 86 86 38 38 38 10 10 10
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 10 10 10 26 26 26 66 66 66 82 82 82
36916- 2 2 6 22 22 22 18 18 18 2 2 6
36917-149 149 149 253 253 253 253 253 253 253 253 253
36918-253 253 253 253 253 253 253 253 253 253 253 253
36919-253 253 253 253 253 253 234 234 234 242 242 242
36920-253 253 253 253 253 253 253 253 253 253 253 253
36921-253 253 253 253 253 253 253 253 253 253 253 253
36922-253 253 253 253 253 253 253 253 253 253 253 253
36923-253 253 253 253 253 253 253 253 253 253 253 253
36924-253 253 253 253 253 253 206 206 206 2 2 6
36925- 2 2 6 2 2 6 2 2 6 38 38 38
36926- 2 2 6 2 2 6 2 2 6 2 2 6
36927- 6 6 6 86 86 86 46 46 46 14 14 14
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 6 6 6
36935- 18 18 18 46 46 46 86 86 86 18 18 18
36936- 2 2 6 34 34 34 10 10 10 6 6 6
36937-210 210 210 253 253 253 253 253 253 253 253 253
36938-253 253 253 253 253 253 253 253 253 253 253 253
36939-253 253 253 253 253 253 234 234 234 242 242 242
36940-253 253 253 253 253 253 253 253 253 253 253 253
36941-253 253 253 253 253 253 253 253 253 253 253 253
36942-253 253 253 253 253 253 253 253 253 253 253 253
36943-253 253 253 253 253 253 253 253 253 253 253 253
36944-253 253 253 253 253 253 221 221 221 6 6 6
36945- 2 2 6 2 2 6 6 6 6 30 30 30
36946- 2 2 6 2 2 6 2 2 6 2 2 6
36947- 2 2 6 82 82 82 54 54 54 18 18 18
36948- 6 6 6 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 10 10 10
36955- 26 26 26 66 66 66 62 62 62 2 2 6
36956- 2 2 6 38 38 38 10 10 10 26 26 26
36957-238 238 238 253 253 253 253 253 253 253 253 253
36958-253 253 253 253 253 253 253 253 253 253 253 253
36959-253 253 253 253 253 253 231 231 231 238 238 238
36960-253 253 253 253 253 253 253 253 253 253 253 253
36961-253 253 253 253 253 253 253 253 253 253 253 253
36962-253 253 253 253 253 253 253 253 253 253 253 253
36963-253 253 253 253 253 253 253 253 253 253 253 253
36964-253 253 253 253 253 253 231 231 231 6 6 6
36965- 2 2 6 2 2 6 10 10 10 30 30 30
36966- 2 2 6 2 2 6 2 2 6 2 2 6
36967- 2 2 6 66 66 66 58 58 58 22 22 22
36968- 6 6 6 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 10 10 10
36975- 38 38 38 78 78 78 6 6 6 2 2 6
36976- 2 2 6 46 46 46 14 14 14 42 42 42
36977-246 246 246 253 253 253 253 253 253 253 253 253
36978-253 253 253 253 253 253 253 253 253 253 253 253
36979-253 253 253 253 253 253 231 231 231 242 242 242
36980-253 253 253 253 253 253 253 253 253 253 253 253
36981-253 253 253 253 253 253 253 253 253 253 253 253
36982-253 253 253 253 253 253 253 253 253 253 253 253
36983-253 253 253 253 253 253 253 253 253 253 253 253
36984-253 253 253 253 253 253 234 234 234 10 10 10
36985- 2 2 6 2 2 6 22 22 22 14 14 14
36986- 2 2 6 2 2 6 2 2 6 2 2 6
36987- 2 2 6 66 66 66 62 62 62 22 22 22
36988- 6 6 6 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 6 6 6 18 18 18
36995- 50 50 50 74 74 74 2 2 6 2 2 6
36996- 14 14 14 70 70 70 34 34 34 62 62 62
36997-250 250 250 253 253 253 253 253 253 253 253 253
36998-253 253 253 253 253 253 253 253 253 253 253 253
36999-253 253 253 253 253 253 231 231 231 246 246 246
37000-253 253 253 253 253 253 253 253 253 253 253 253
37001-253 253 253 253 253 253 253 253 253 253 253 253
37002-253 253 253 253 253 253 253 253 253 253 253 253
37003-253 253 253 253 253 253 253 253 253 253 253 253
37004-253 253 253 253 253 253 234 234 234 14 14 14
37005- 2 2 6 2 2 6 30 30 30 2 2 6
37006- 2 2 6 2 2 6 2 2 6 2 2 6
37007- 2 2 6 66 66 66 62 62 62 22 22 22
37008- 6 6 6 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 6 6 6 18 18 18
37015- 54 54 54 62 62 62 2 2 6 2 2 6
37016- 2 2 6 30 30 30 46 46 46 70 70 70
37017-250 250 250 253 253 253 253 253 253 253 253 253
37018-253 253 253 253 253 253 253 253 253 253 253 253
37019-253 253 253 253 253 253 231 231 231 246 246 246
37020-253 253 253 253 253 253 253 253 253 253 253 253
37021-253 253 253 253 253 253 253 253 253 253 253 253
37022-253 253 253 253 253 253 253 253 253 253 253 253
37023-253 253 253 253 253 253 253 253 253 253 253 253
37024-253 253 253 253 253 253 226 226 226 10 10 10
37025- 2 2 6 6 6 6 30 30 30 2 2 6
37026- 2 2 6 2 2 6 2 2 6 2 2 6
37027- 2 2 6 66 66 66 58 58 58 22 22 22
37028- 6 6 6 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 6 6 6 22 22 22
37035- 58 58 58 62 62 62 2 2 6 2 2 6
37036- 2 2 6 2 2 6 30 30 30 78 78 78
37037-250 250 250 253 253 253 253 253 253 253 253 253
37038-253 253 253 253 253 253 253 253 253 253 253 253
37039-253 253 253 253 253 253 231 231 231 246 246 246
37040-253 253 253 253 253 253 253 253 253 253 253 253
37041-253 253 253 253 253 253 253 253 253 253 253 253
37042-253 253 253 253 253 253 253 253 253 253 253 253
37043-253 253 253 253 253 253 253 253 253 253 253 253
37044-253 253 253 253 253 253 206 206 206 2 2 6
37045- 22 22 22 34 34 34 18 14 6 22 22 22
37046- 26 26 26 18 18 18 6 6 6 2 2 6
37047- 2 2 6 82 82 82 54 54 54 18 18 18
37048- 6 6 6 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 6 6 6 26 26 26
37055- 62 62 62 106 106 106 74 54 14 185 133 11
37056-210 162 10 121 92 8 6 6 6 62 62 62
37057-238 238 238 253 253 253 253 253 253 253 253 253
37058-253 253 253 253 253 253 253 253 253 253 253 253
37059-253 253 253 253 253 253 231 231 231 246 246 246
37060-253 253 253 253 253 253 253 253 253 253 253 253
37061-253 253 253 253 253 253 253 253 253 253 253 253
37062-253 253 253 253 253 253 253 253 253 253 253 253
37063-253 253 253 253 253 253 253 253 253 253 253 253
37064-253 253 253 253 253 253 158 158 158 18 18 18
37065- 14 14 14 2 2 6 2 2 6 2 2 6
37066- 6 6 6 18 18 18 66 66 66 38 38 38
37067- 6 6 6 94 94 94 50 50 50 18 18 18
37068- 6 6 6 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 6 6 6
37074- 10 10 10 10 10 10 18 18 18 38 38 38
37075- 78 78 78 142 134 106 216 158 10 242 186 14
37076-246 190 14 246 190 14 156 118 10 10 10 10
37077- 90 90 90 238 238 238 253 253 253 253 253 253
37078-253 253 253 253 253 253 253 253 253 253 253 253
37079-253 253 253 253 253 253 231 231 231 250 250 250
37080-253 253 253 253 253 253 253 253 253 253 253 253
37081-253 253 253 253 253 253 253 253 253 253 253 253
37082-253 253 253 253 253 253 253 253 253 253 253 253
37083-253 253 253 253 253 253 253 253 253 246 230 190
37084-238 204 91 238 204 91 181 142 44 37 26 9
37085- 2 2 6 2 2 6 2 2 6 2 2 6
37086- 2 2 6 2 2 6 38 38 38 46 46 46
37087- 26 26 26 106 106 106 54 54 54 18 18 18
37088- 6 6 6 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 6 6 6 14 14 14 22 22 22
37094- 30 30 30 38 38 38 50 50 50 70 70 70
37095-106 106 106 190 142 34 226 170 11 242 186 14
37096-246 190 14 246 190 14 246 190 14 154 114 10
37097- 6 6 6 74 74 74 226 226 226 253 253 253
37098-253 253 253 253 253 253 253 253 253 253 253 253
37099-253 253 253 253 253 253 231 231 231 250 250 250
37100-253 253 253 253 253 253 253 253 253 253 253 253
37101-253 253 253 253 253 253 253 253 253 253 253 253
37102-253 253 253 253 253 253 253 253 253 253 253 253
37103-253 253 253 253 253 253 253 253 253 228 184 62
37104-241 196 14 241 208 19 232 195 16 38 30 10
37105- 2 2 6 2 2 6 2 2 6 2 2 6
37106- 2 2 6 6 6 6 30 30 30 26 26 26
37107-203 166 17 154 142 90 66 66 66 26 26 26
37108- 6 6 6 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 6 6 6 18 18 18 38 38 38 58 58 58
37114- 78 78 78 86 86 86 101 101 101 123 123 123
37115-175 146 61 210 150 10 234 174 13 246 186 14
37116-246 190 14 246 190 14 246 190 14 238 190 10
37117-102 78 10 2 2 6 46 46 46 198 198 198
37118-253 253 253 253 253 253 253 253 253 253 253 253
37119-253 253 253 253 253 253 234 234 234 242 242 242
37120-253 253 253 253 253 253 253 253 253 253 253 253
37121-253 253 253 253 253 253 253 253 253 253 253 253
37122-253 253 253 253 253 253 253 253 253 253 253 253
37123-253 253 253 253 253 253 253 253 253 224 178 62
37124-242 186 14 241 196 14 210 166 10 22 18 6
37125- 2 2 6 2 2 6 2 2 6 2 2 6
37126- 2 2 6 2 2 6 6 6 6 121 92 8
37127-238 202 15 232 195 16 82 82 82 34 34 34
37128- 10 10 10 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 14 14 14 38 38 38 70 70 70 154 122 46
37134-190 142 34 200 144 11 197 138 11 197 138 11
37135-213 154 11 226 170 11 242 186 14 246 190 14
37136-246 190 14 246 190 14 246 190 14 246 190 14
37137-225 175 15 46 32 6 2 2 6 22 22 22
37138-158 158 158 250 250 250 253 253 253 253 253 253
37139-253 253 253 253 253 253 253 253 253 253 253 253
37140-253 253 253 253 253 253 253 253 253 253 253 253
37141-253 253 253 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 250 250 250 242 242 242 224 178 62
37144-239 182 13 236 186 11 213 154 11 46 32 6
37145- 2 2 6 2 2 6 2 2 6 2 2 6
37146- 2 2 6 2 2 6 61 42 6 225 175 15
37147-238 190 10 236 186 11 112 100 78 42 42 42
37148- 14 14 14 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 6 6 6
37153- 22 22 22 54 54 54 154 122 46 213 154 11
37154-226 170 11 230 174 11 226 170 11 226 170 11
37155-236 178 12 242 186 14 246 190 14 246 190 14
37156-246 190 14 246 190 14 246 190 14 246 190 14
37157-241 196 14 184 144 12 10 10 10 2 2 6
37158- 6 6 6 116 116 116 242 242 242 253 253 253
37159-253 253 253 253 253 253 253 253 253 253 253 253
37160-253 253 253 253 253 253 253 253 253 253 253 253
37161-253 253 253 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 231 231 231 198 198 198 214 170 54
37164-236 178 12 236 178 12 210 150 10 137 92 6
37165- 18 14 6 2 2 6 2 2 6 2 2 6
37166- 6 6 6 70 47 6 200 144 11 236 178 12
37167-239 182 13 239 182 13 124 112 88 58 58 58
37168- 22 22 22 6 6 6 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 10 10 10
37173- 30 30 30 70 70 70 180 133 36 226 170 11
37174-239 182 13 242 186 14 242 186 14 246 186 14
37175-246 190 14 246 190 14 246 190 14 246 190 14
37176-246 190 14 246 190 14 246 190 14 246 190 14
37177-246 190 14 232 195 16 98 70 6 2 2 6
37178- 2 2 6 2 2 6 66 66 66 221 221 221
37179-253 253 253 253 253 253 253 253 253 253 253 253
37180-253 253 253 253 253 253 253 253 253 253 253 253
37181-253 253 253 253 253 253 253 253 253 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 206 206 206 198 198 198 214 166 58
37184-230 174 11 230 174 11 216 158 10 192 133 9
37185-163 110 8 116 81 8 102 78 10 116 81 8
37186-167 114 7 197 138 11 226 170 11 239 182 13
37187-242 186 14 242 186 14 162 146 94 78 78 78
37188- 34 34 34 14 14 14 6 6 6 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 6 6 6
37193- 30 30 30 78 78 78 190 142 34 226 170 11
37194-239 182 13 246 190 14 246 190 14 246 190 14
37195-246 190 14 246 190 14 246 190 14 246 190 14
37196-246 190 14 246 190 14 246 190 14 246 190 14
37197-246 190 14 241 196 14 203 166 17 22 18 6
37198- 2 2 6 2 2 6 2 2 6 38 38 38
37199-218 218 218 253 253 253 253 253 253 253 253 253
37200-253 253 253 253 253 253 253 253 253 253 253 253
37201-253 253 253 253 253 253 253 253 253 253 253 253
37202-253 253 253 253 253 253 253 253 253 253 253 253
37203-250 250 250 206 206 206 198 198 198 202 162 69
37204-226 170 11 236 178 12 224 166 10 210 150 10
37205-200 144 11 197 138 11 192 133 9 197 138 11
37206-210 150 10 226 170 11 242 186 14 246 190 14
37207-246 190 14 246 186 14 225 175 15 124 112 88
37208- 62 62 62 30 30 30 14 14 14 6 6 6
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 10 10 10
37213- 30 30 30 78 78 78 174 135 50 224 166 10
37214-239 182 13 246 190 14 246 190 14 246 190 14
37215-246 190 14 246 190 14 246 190 14 246 190 14
37216-246 190 14 246 190 14 246 190 14 246 190 14
37217-246 190 14 246 190 14 241 196 14 139 102 15
37218- 2 2 6 2 2 6 2 2 6 2 2 6
37219- 78 78 78 250 250 250 253 253 253 253 253 253
37220-253 253 253 253 253 253 253 253 253 253 253 253
37221-253 253 253 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 253 253 253
37223-250 250 250 214 214 214 198 198 198 190 150 46
37224-219 162 10 236 178 12 234 174 13 224 166 10
37225-216 158 10 213 154 11 213 154 11 216 158 10
37226-226 170 11 239 182 13 246 190 14 246 190 14
37227-246 190 14 246 190 14 242 186 14 206 162 42
37228-101 101 101 58 58 58 30 30 30 14 14 14
37229- 6 6 6 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 10 10 10
37233- 30 30 30 74 74 74 174 135 50 216 158 10
37234-236 178 12 246 190 14 246 190 14 246 190 14
37235-246 190 14 246 190 14 246 190 14 246 190 14
37236-246 190 14 246 190 14 246 190 14 246 190 14
37237-246 190 14 246 190 14 241 196 14 226 184 13
37238- 61 42 6 2 2 6 2 2 6 2 2 6
37239- 22 22 22 238 238 238 253 253 253 253 253 253
37240-253 253 253 253 253 253 253 253 253 253 253 253
37241-253 253 253 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 226 226 226 187 187 187 180 133 36
37244-216 158 10 236 178 12 239 182 13 236 178 12
37245-230 174 11 226 170 11 226 170 11 230 174 11
37246-236 178 12 242 186 14 246 190 14 246 190 14
37247-246 190 14 246 190 14 246 186 14 239 182 13
37248-206 162 42 106 106 106 66 66 66 34 34 34
37249- 14 14 14 6 6 6 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 6 6 6
37253- 26 26 26 70 70 70 163 133 67 213 154 11
37254-236 178 12 246 190 14 246 190 14 246 190 14
37255-246 190 14 246 190 14 246 190 14 246 190 14
37256-246 190 14 246 190 14 246 190 14 246 190 14
37257-246 190 14 246 190 14 246 190 14 241 196 14
37258-190 146 13 18 14 6 2 2 6 2 2 6
37259- 46 46 46 246 246 246 253 253 253 253 253 253
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 221 221 221 86 86 86 156 107 11
37264-216 158 10 236 178 12 242 186 14 246 186 14
37265-242 186 14 239 182 13 239 182 13 242 186 14
37266-242 186 14 246 186 14 246 190 14 246 190 14
37267-246 190 14 246 190 14 246 190 14 246 190 14
37268-242 186 14 225 175 15 142 122 72 66 66 66
37269- 30 30 30 10 10 10 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 6 6 6
37273- 26 26 26 70 70 70 163 133 67 210 150 10
37274-236 178 12 246 190 14 246 190 14 246 190 14
37275-246 190 14 246 190 14 246 190 14 246 190 14
37276-246 190 14 246 190 14 246 190 14 246 190 14
37277-246 190 14 246 190 14 246 190 14 246 190 14
37278-232 195 16 121 92 8 34 34 34 106 106 106
37279-221 221 221 253 253 253 253 253 253 253 253 253
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-242 242 242 82 82 82 18 14 6 163 110 8
37284-216 158 10 236 178 12 242 186 14 246 190 14
37285-246 190 14 246 190 14 246 190 14 246 190 14
37286-246 190 14 246 190 14 246 190 14 246 190 14
37287-246 190 14 246 190 14 246 190 14 246 190 14
37288-246 190 14 246 190 14 242 186 14 163 133 67
37289- 46 46 46 18 18 18 6 6 6 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 10 10 10
37293- 30 30 30 78 78 78 163 133 67 210 150 10
37294-236 178 12 246 186 14 246 190 14 246 190 14
37295-246 190 14 246 190 14 246 190 14 246 190 14
37296-246 190 14 246 190 14 246 190 14 246 190 14
37297-246 190 14 246 190 14 246 190 14 246 190 14
37298-241 196 14 215 174 15 190 178 144 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 253 253 253 218 218 218
37303- 58 58 58 2 2 6 22 18 6 167 114 7
37304-216 158 10 236 178 12 246 186 14 246 190 14
37305-246 190 14 246 190 14 246 190 14 246 190 14
37306-246 190 14 246 190 14 246 190 14 246 190 14
37307-246 190 14 246 190 14 246 190 14 246 190 14
37308-246 190 14 246 186 14 242 186 14 190 150 46
37309- 54 54 54 22 22 22 6 6 6 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 14 14 14
37313- 38 38 38 86 86 86 180 133 36 213 154 11
37314-236 178 12 246 186 14 246 190 14 246 190 14
37315-246 190 14 246 190 14 246 190 14 246 190 14
37316-246 190 14 246 190 14 246 190 14 246 190 14
37317-246 190 14 246 190 14 246 190 14 246 190 14
37318-246 190 14 232 195 16 190 146 13 214 214 214
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 253 253 253 253 253 253
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 250 250 250 170 170 170 26 26 26
37323- 2 2 6 2 2 6 37 26 9 163 110 8
37324-219 162 10 239 182 13 246 186 14 246 190 14
37325-246 190 14 246 190 14 246 190 14 246 190 14
37326-246 190 14 246 190 14 246 190 14 246 190 14
37327-246 190 14 246 190 14 246 190 14 246 190 14
37328-246 186 14 236 178 12 224 166 10 142 122 72
37329- 46 46 46 18 18 18 6 6 6 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 6 6 6 18 18 18
37333- 50 50 50 109 106 95 192 133 9 224 166 10
37334-242 186 14 246 190 14 246 190 14 246 190 14
37335-246 190 14 246 190 14 246 190 14 246 190 14
37336-246 190 14 246 190 14 246 190 14 246 190 14
37337-246 190 14 246 190 14 246 190 14 246 190 14
37338-242 186 14 226 184 13 210 162 10 142 110 46
37339-226 226 226 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 253 253 253 253 253 253
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-198 198 198 66 66 66 2 2 6 2 2 6
37343- 2 2 6 2 2 6 50 34 6 156 107 11
37344-219 162 10 239 182 13 246 186 14 246 190 14
37345-246 190 14 246 190 14 246 190 14 246 190 14
37346-246 190 14 246 190 14 246 190 14 246 190 14
37347-246 190 14 246 190 14 246 190 14 242 186 14
37348-234 174 13 213 154 11 154 122 46 66 66 66
37349- 30 30 30 10 10 10 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 6 6 6 22 22 22
37353- 58 58 58 154 121 60 206 145 10 234 174 13
37354-242 186 14 246 186 14 246 190 14 246 190 14
37355-246 190 14 246 190 14 246 190 14 246 190 14
37356-246 190 14 246 190 14 246 190 14 246 190 14
37357-246 190 14 246 190 14 246 190 14 246 190 14
37358-246 186 14 236 178 12 210 162 10 163 110 8
37359- 61 42 6 138 138 138 218 218 218 250 250 250
37360-253 253 253 253 253 253 253 253 253 250 250 250
37361-242 242 242 210 210 210 144 144 144 66 66 66
37362- 6 6 6 2 2 6 2 2 6 2 2 6
37363- 2 2 6 2 2 6 61 42 6 163 110 8
37364-216 158 10 236 178 12 246 190 14 246 190 14
37365-246 190 14 246 190 14 246 190 14 246 190 14
37366-246 190 14 246 190 14 246 190 14 246 190 14
37367-246 190 14 239 182 13 230 174 11 216 158 10
37368-190 142 34 124 112 88 70 70 70 38 38 38
37369- 18 18 18 6 6 6 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 6 6 6 22 22 22
37373- 62 62 62 168 124 44 206 145 10 224 166 10
37374-236 178 12 239 182 13 242 186 14 242 186 14
37375-246 186 14 246 190 14 246 190 14 246 190 14
37376-246 190 14 246 190 14 246 190 14 246 190 14
37377-246 190 14 246 190 14 246 190 14 246 190 14
37378-246 190 14 236 178 12 216 158 10 175 118 6
37379- 80 54 7 2 2 6 6 6 6 30 30 30
37380- 54 54 54 62 62 62 50 50 50 38 38 38
37381- 14 14 14 2 2 6 2 2 6 2 2 6
37382- 2 2 6 2 2 6 2 2 6 2 2 6
37383- 2 2 6 6 6 6 80 54 7 167 114 7
37384-213 154 11 236 178 12 246 190 14 246 190 14
37385-246 190 14 246 190 14 246 190 14 246 190 14
37386-246 190 14 242 186 14 239 182 13 239 182 13
37387-230 174 11 210 150 10 174 135 50 124 112 88
37388- 82 82 82 54 54 54 34 34 34 18 18 18
37389- 6 6 6 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 6 6 6 18 18 18
37393- 50 50 50 158 118 36 192 133 9 200 144 11
37394-216 158 10 219 162 10 224 166 10 226 170 11
37395-230 174 11 236 178 12 239 182 13 239 182 13
37396-242 186 14 246 186 14 246 190 14 246 190 14
37397-246 190 14 246 190 14 246 190 14 246 190 14
37398-246 186 14 230 174 11 210 150 10 163 110 8
37399-104 69 6 10 10 10 2 2 6 2 2 6
37400- 2 2 6 2 2 6 2 2 6 2 2 6
37401- 2 2 6 2 2 6 2 2 6 2 2 6
37402- 2 2 6 2 2 6 2 2 6 2 2 6
37403- 2 2 6 6 6 6 91 60 6 167 114 7
37404-206 145 10 230 174 11 242 186 14 246 190 14
37405-246 190 14 246 190 14 246 186 14 242 186 14
37406-239 182 13 230 174 11 224 166 10 213 154 11
37407-180 133 36 124 112 88 86 86 86 58 58 58
37408- 38 38 38 22 22 22 10 10 10 6 6 6
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 14 14 14
37413- 34 34 34 70 70 70 138 110 50 158 118 36
37414-167 114 7 180 123 7 192 133 9 197 138 11
37415-200 144 11 206 145 10 213 154 11 219 162 10
37416-224 166 10 230 174 11 239 182 13 242 186 14
37417-246 186 14 246 186 14 246 186 14 246 186 14
37418-239 182 13 216 158 10 185 133 11 152 99 6
37419-104 69 6 18 14 6 2 2 6 2 2 6
37420- 2 2 6 2 2 6 2 2 6 2 2 6
37421- 2 2 6 2 2 6 2 2 6 2 2 6
37422- 2 2 6 2 2 6 2 2 6 2 2 6
37423- 2 2 6 6 6 6 80 54 7 152 99 6
37424-192 133 9 219 162 10 236 178 12 239 182 13
37425-246 186 14 242 186 14 239 182 13 236 178 12
37426-224 166 10 206 145 10 192 133 9 154 121 60
37427- 94 94 94 62 62 62 42 42 42 22 22 22
37428- 14 14 14 6 6 6 0 0 0 0 0 0
37429- 0 0 0 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 6 6 6
37433- 18 18 18 34 34 34 58 58 58 78 78 78
37434-101 98 89 124 112 88 142 110 46 156 107 11
37435-163 110 8 167 114 7 175 118 6 180 123 7
37436-185 133 11 197 138 11 210 150 10 219 162 10
37437-226 170 11 236 178 12 236 178 12 234 174 13
37438-219 162 10 197 138 11 163 110 8 130 83 6
37439- 91 60 6 10 10 10 2 2 6 2 2 6
37440- 18 18 18 38 38 38 38 38 38 38 38 38
37441- 38 38 38 38 38 38 38 38 38 38 38 38
37442- 38 38 38 38 38 38 26 26 26 2 2 6
37443- 2 2 6 6 6 6 70 47 6 137 92 6
37444-175 118 6 200 144 11 219 162 10 230 174 11
37445-234 174 13 230 174 11 219 162 10 210 150 10
37446-192 133 9 163 110 8 124 112 88 82 82 82
37447- 50 50 50 30 30 30 14 14 14 6 6 6
37448- 0 0 0 0 0 0 0 0 0 0 0 0
37449- 0 0 0 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 6 6 6 14 14 14 22 22 22 34 34 34
37454- 42 42 42 58 58 58 74 74 74 86 86 86
37455-101 98 89 122 102 70 130 98 46 121 87 25
37456-137 92 6 152 99 6 163 110 8 180 123 7
37457-185 133 11 197 138 11 206 145 10 200 144 11
37458-180 123 7 156 107 11 130 83 6 104 69 6
37459- 50 34 6 54 54 54 110 110 110 101 98 89
37460- 86 86 86 82 82 82 78 78 78 78 78 78
37461- 78 78 78 78 78 78 78 78 78 78 78 78
37462- 78 78 78 82 82 82 86 86 86 94 94 94
37463-106 106 106 101 101 101 86 66 34 124 80 6
37464-156 107 11 180 123 7 192 133 9 200 144 11
37465-206 145 10 200 144 11 192 133 9 175 118 6
37466-139 102 15 109 106 95 70 70 70 42 42 42
37467- 22 22 22 10 10 10 0 0 0 0 0 0
37468- 0 0 0 0 0 0 0 0 0 0 0 0
37469- 0 0 0 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 0 0 0 0 0 0 6 6 6 10 10 10
37474- 14 14 14 22 22 22 30 30 30 38 38 38
37475- 50 50 50 62 62 62 74 74 74 90 90 90
37476-101 98 89 112 100 78 121 87 25 124 80 6
37477-137 92 6 152 99 6 152 99 6 152 99 6
37478-138 86 6 124 80 6 98 70 6 86 66 30
37479-101 98 89 82 82 82 58 58 58 46 46 46
37480- 38 38 38 34 34 34 34 34 34 34 34 34
37481- 34 34 34 34 34 34 34 34 34 34 34 34
37482- 34 34 34 34 34 34 38 38 38 42 42 42
37483- 54 54 54 82 82 82 94 86 76 91 60 6
37484-134 86 6 156 107 11 167 114 7 175 118 6
37485-175 118 6 167 114 7 152 99 6 121 87 25
37486-101 98 89 62 62 62 34 34 34 18 18 18
37487- 6 6 6 0 0 0 0 0 0 0 0 0
37488- 0 0 0 0 0 0 0 0 0 0 0 0
37489- 0 0 0 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 0 0 0 0 0 0 0 0 0 0 0 0
37494- 0 0 0 6 6 6 6 6 6 10 10 10
37495- 18 18 18 22 22 22 30 30 30 42 42 42
37496- 50 50 50 66 66 66 86 86 86 101 98 89
37497-106 86 58 98 70 6 104 69 6 104 69 6
37498-104 69 6 91 60 6 82 62 34 90 90 90
37499- 62 62 62 38 38 38 22 22 22 14 14 14
37500- 10 10 10 10 10 10 10 10 10 10 10 10
37501- 10 10 10 10 10 10 6 6 6 10 10 10
37502- 10 10 10 10 10 10 10 10 10 14 14 14
37503- 22 22 22 42 42 42 70 70 70 89 81 66
37504- 80 54 7 104 69 6 124 80 6 137 92 6
37505-134 86 6 116 81 8 100 82 52 86 86 86
37506- 58 58 58 30 30 30 14 14 14 6 6 6
37507- 0 0 0 0 0 0 0 0 0 0 0 0
37508- 0 0 0 0 0 0 0 0 0 0 0 0
37509- 0 0 0 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 0 0 0
37513- 0 0 0 0 0 0 0 0 0 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 6 6 6 10 10 10 14 14 14
37516- 18 18 18 26 26 26 38 38 38 54 54 54
37517- 70 70 70 86 86 86 94 86 76 89 81 66
37518- 89 81 66 86 86 86 74 74 74 50 50 50
37519- 30 30 30 14 14 14 6 6 6 0 0 0
37520- 0 0 0 0 0 0 0 0 0 0 0 0
37521- 0 0 0 0 0 0 0 0 0 0 0 0
37522- 0 0 0 0 0 0 0 0 0 0 0 0
37523- 6 6 6 18 18 18 34 34 34 58 58 58
37524- 82 82 82 89 81 66 89 81 66 89 81 66
37525- 94 86 66 94 86 76 74 74 74 50 50 50
37526- 26 26 26 14 14 14 6 6 6 0 0 0
37527- 0 0 0 0 0 0 0 0 0 0 0 0
37528- 0 0 0 0 0 0 0 0 0 0 0 0
37529- 0 0 0 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 0 0 0
37533- 0 0 0 0 0 0 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 0 0 0 0 0 0
37536- 6 6 6 6 6 6 14 14 14 18 18 18
37537- 30 30 30 38 38 38 46 46 46 54 54 54
37538- 50 50 50 42 42 42 30 30 30 18 18 18
37539- 10 10 10 0 0 0 0 0 0 0 0 0
37540- 0 0 0 0 0 0 0 0 0 0 0 0
37541- 0 0 0 0 0 0 0 0 0 0 0 0
37542- 0 0 0 0 0 0 0 0 0 0 0 0
37543- 0 0 0 6 6 6 14 14 14 26 26 26
37544- 38 38 38 50 50 50 58 58 58 58 58 58
37545- 54 54 54 42 42 42 30 30 30 18 18 18
37546- 10 10 10 0 0 0 0 0 0 0 0 0
37547- 0 0 0 0 0 0 0 0 0 0 0 0
37548- 0 0 0 0 0 0 0 0 0 0 0 0
37549- 0 0 0 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 0 0 0
37553- 0 0 0 0 0 0 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 0 0 0
37555- 0 0 0 0 0 0 0 0 0 0 0 0
37556- 0 0 0 0 0 0 0 0 0 6 6 6
37557- 6 6 6 10 10 10 14 14 14 18 18 18
37558- 18 18 18 14 14 14 10 10 10 6 6 6
37559- 0 0 0 0 0 0 0 0 0 0 0 0
37560- 0 0 0 0 0 0 0 0 0 0 0 0
37561- 0 0 0 0 0 0 0 0 0 0 0 0
37562- 0 0 0 0 0 0 0 0 0 0 0 0
37563- 0 0 0 0 0 0 0 0 0 6 6 6
37564- 14 14 14 18 18 18 22 22 22 22 22 22
37565- 18 18 18 14 14 14 10 10 10 6 6 6
37566- 0 0 0 0 0 0 0 0 0 0 0 0
37567- 0 0 0 0 0 0 0 0 0 0 0 0
37568- 0 0 0 0 0 0 0 0 0 0 0 0
37569- 0 0 0 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37584+4 4 4 4 4 4
37585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37598+4 4 4 4 4 4
37599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37612+4 4 4 4 4 4
37613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37626+4 4 4 4 4 4
37627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37640+4 4 4 4 4 4
37641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37654+4 4 4 4 4 4
37655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37659+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
37660+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
37661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37664+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
37665+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37666+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
37667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37668+4 4 4 4 4 4
37669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37673+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
37674+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
37675+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37678+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
37679+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
37680+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
37681+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37682+4 4 4 4 4 4
37683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37687+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
37688+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
37689+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37692+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
37693+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
37694+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
37695+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
37696+4 4 4 4 4 4
37697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37700+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
37701+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
37702+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
37703+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
37704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37705+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37706+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
37707+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
37708+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
37709+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
37710+4 4 4 4 4 4
37711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37714+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
37715+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
37716+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
37717+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
37718+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
37719+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
37720+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
37721+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
37722+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
37723+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
37724+4 4 4 4 4 4
37725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
37728+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
37729+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
37730+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
37731+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
37732+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
37733+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
37734+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
37735+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
37736+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
37737+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
37738+4 4 4 4 4 4
37739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37741+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
37742+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
37743+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
37744+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
37745+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
37746+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
37747+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
37748+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
37749+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
37750+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
37751+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
37752+4 4 4 4 4 4
37753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37755+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
37756+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
37757+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
37758+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
37759+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
37760+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
37761+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
37762+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
37763+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
37764+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
37765+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
37766+4 4 4 4 4 4
37767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37769+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
37770+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
37771+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
37772+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
37773+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
37774+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
37775+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
37776+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
37777+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
37778+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
37779+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
37780+4 4 4 4 4 4
37781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37783+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
37784+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
37785+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
37786+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
37787+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
37788+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
37789+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
37790+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
37791+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
37792+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
37793+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
37794+4 4 4 4 4 4
37795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37796+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
37797+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
37798+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
37799+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
37800+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
37801+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
37802+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
37803+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
37804+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
37805+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
37806+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
37807+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
37808+4 4 4 4 4 4
37809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37810+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
37811+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
37812+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
37813+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
37814+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
37815+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
37816+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
37817+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
37818+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
37819+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
37820+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
37821+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
37822+0 0 0 4 4 4
37823+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
37824+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
37825+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
37826+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
37827+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
37828+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
37829+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
37830+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
37831+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
37832+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
37833+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
37834+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
37835+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
37836+2 0 0 0 0 0
37837+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
37838+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
37839+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
37840+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
37841+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
37842+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
37843+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
37844+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
37845+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
37846+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
37847+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
37848+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
37849+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
37850+37 38 37 0 0 0
37851+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37852+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
37853+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
37854+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
37855+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
37856+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
37857+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
37858+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
37859+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
37860+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
37861+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
37862+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
37863+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
37864+85 115 134 4 0 0
37865+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
37866+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
37867+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
37868+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
37869+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
37870+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
37871+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
37872+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
37873+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
37874+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
37875+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
37876+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
37877+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
37878+60 73 81 4 0 0
37879+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
37880+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
37881+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
37882+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
37883+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
37884+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
37885+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
37886+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
37887+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
37888+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
37889+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
37890+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
37891+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
37892+16 19 21 4 0 0
37893+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
37894+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
37895+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
37896+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
37897+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
37898+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
37899+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
37900+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
37901+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
37902+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
37903+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
37904+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
37905+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
37906+4 0 0 4 3 3
37907+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
37908+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
37909+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
37910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
37911+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
37912+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
37913+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
37914+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
37915+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
37916+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
37917+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
37918+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
37919+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
37920+3 2 2 4 4 4
37921+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
37922+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
37923+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
37924+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
37925+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
37926+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
37927+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
37928+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
37929+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
37930+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
37931+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
37932+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
37933+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
37934+4 4 4 4 4 4
37935+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
37936+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
37937+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
37938+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
37939+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
37940+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
37941+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
37942+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
37943+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
37944+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
37945+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
37946+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
37947+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
37948+4 4 4 4 4 4
37949+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
37950+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
37951+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
37952+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
37953+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
37954+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
37955+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
37956+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
37957+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
37958+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
37959+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
37960+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
37961+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
37962+5 5 5 5 5 5
37963+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
37964+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
37965+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
37966+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
37967+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
37968+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37969+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
37970+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
37971+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
37972+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
37973+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
37974+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
37975+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
37976+5 5 5 4 4 4
37977+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
37978+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
37979+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
37980+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
37981+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
37982+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
37983+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
37984+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
37985+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
37986+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
37987+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
37988+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
37989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
37990+4 4 4 4 4 4
37991+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
37992+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
37993+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
37994+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
37995+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
37996+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37997+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
37998+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
37999+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38000+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38001+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38002+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38004+4 4 4 4 4 4
38005+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38006+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38007+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38008+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38009+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38010+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38011+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38012+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38013+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38014+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38015+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018+4 4 4 4 4 4
38019+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38020+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38021+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38022+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38023+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38024+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38025+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38026+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38027+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38028+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38029+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032+4 4 4 4 4 4
38033+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38034+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38035+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38036+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38037+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38038+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38039+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38040+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38041+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38042+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38043+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046+4 4 4 4 4 4
38047+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38048+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38049+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38050+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38051+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38052+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38053+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38054+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38055+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38056+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38057+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4
38061+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38062+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38063+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38064+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38065+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38066+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38067+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38068+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38069+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38070+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38071+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 4 4 4
38075+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38076+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38077+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38078+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38079+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38080+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38081+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38082+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38083+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38084+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38085+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088+4 4 4 4 4 4
38089+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38090+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38091+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38092+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38093+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38094+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38095+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38096+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38097+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38098+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38099+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102+4 4 4 4 4 4
38103+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38104+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38105+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38106+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38107+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38108+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38109+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38110+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38111+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38112+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38113+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4
38117+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38118+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38119+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38120+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38121+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38122+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38123+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38124+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38125+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38126+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38127+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4
38131+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38132+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38133+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38134+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38135+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38136+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38137+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38138+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38139+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38140+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38141+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144+4 4 4 4 4 4
38145+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38146+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38147+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38148+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38149+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38150+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38151+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38152+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38153+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38154+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38155+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4
38159+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38160+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38161+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38162+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38163+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38164+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38165+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38166+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38167+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38168+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38169+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4
38173+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38174+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38175+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38176+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38177+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38178+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38179+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38180+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38181+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38182+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38183+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4
38187+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38188+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38189+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38190+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38191+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38192+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38193+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38194+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38195+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38196+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38197+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38200+4 4 4 4 4 4
38201+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38202+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38203+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38204+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38205+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38206+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38207+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38208+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38209+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38210+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38211+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38214+4 4 4 4 4 4
38215+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38216+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38217+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38218+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38219+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38220+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38221+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38222+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38223+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38224+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38225+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38228+4 4 4 4 4 4
38229+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38230+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38231+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38232+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38233+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38234+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38235+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38236+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38237+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38238+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38239+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242+4 4 4 4 4 4
38243+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38244+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38245+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38246+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38247+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38248+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38249+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38250+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38251+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38252+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38253+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256+4 4 4 4 4 4
38257+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38258+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38259+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38260+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38261+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38262+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38263+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38264+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38265+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38266+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38267+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270+4 4 4 4 4 4
38271+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38272+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38273+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38274+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38275+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38276+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38277+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38278+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38279+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38280+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38281+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284+4 4 4 4 4 4
38285+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38286+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38287+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38288+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38289+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38290+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38291+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38292+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38293+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38294+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38295+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298+4 4 4 4 4 4
38299+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38300+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38301+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38302+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38303+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38304+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38305+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38306+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38307+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38308+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38309+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312+4 4 4 4 4 4
38313+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38314+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38315+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38316+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38317+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38318+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38319+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38320+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38321+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38322+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38323+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38326+4 4 4 4 4 4
38327+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38328+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38329+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38330+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38331+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38332+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38333+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38334+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38335+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38336+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38337+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38340+4 4 4 4 4 4
38341+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38342+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38343+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38344+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38345+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38346+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38347+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38348+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38349+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38350+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38351+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38354+4 4 4 4 4 4
38355+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38356+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38357+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38358+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38359+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38360+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38361+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38362+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38363+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38364+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38365+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38368+4 4 4 4 4 4
38369+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38370+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38371+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38372+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38373+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38374+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38375+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38376+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38377+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38378+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38379+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38382+4 4 4 4 4 4
38383+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38384+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38385+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38386+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38387+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38388+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38389+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38390+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38391+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38392+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38396+4 4 4 4 4 4
38397+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38398+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38399+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38400+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38401+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38402+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38403+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38404+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38405+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38406+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38410+4 4 4 4 4 4
38411+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38412+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38413+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38414+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38415+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38416+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38417+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38418+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38419+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38420+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38424+4 4 4 4 4 4
38425+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38426+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38427+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38428+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38429+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38430+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38431+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38432+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38433+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38434+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38438+4 4 4 4 4 4
38439+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38440+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38441+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38442+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38443+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38444+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38445+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38446+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38447+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38452+4 4 4 4 4 4
38453+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38454+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38455+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38456+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38457+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38458+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38459+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38460+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38461+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38466+4 4 4 4 4 4
38467+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38468+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38469+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38470+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38471+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38472+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38473+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38474+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38475+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38480+4 4 4 4 4 4
38481+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38482+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38483+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38484+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38485+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38486+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38487+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38488+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38494+4 4 4 4 4 4
38495+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38496+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38497+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38498+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38499+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38500+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38501+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38502+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38508+4 4 4 4 4 4
38509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38510+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38511+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38512+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38513+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38514+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38515+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38516+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38522+4 4 4 4 4 4
38523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38524+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38525+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38526+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38527+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38528+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38529+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38530+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38536+4 4 4 4 4 4
38537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38538+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38539+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38540+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38541+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38542+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38543+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38544+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38550+4 4 4 4 4 4
38551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38554+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38555+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38556+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38557+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38558+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38564+4 4 4 4 4 4
38565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38568+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38569+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
38570+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
38571+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
38572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38578+4 4 4 4 4 4
38579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38583+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38584+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
38585+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
38586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38592+4 4 4 4 4 4
38593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38597+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38598+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38599+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
38600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38606+4 4 4 4 4 4
38607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
38611+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
38612+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
38613+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
38614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38620+4 4 4 4 4 4
38621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38625+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
38626+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38627+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38634+4 4 4 4 4 4
38635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
38640+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
38641+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38648+4 4 4 4 4 4
38649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
38654+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
38655+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38662+4 4 4 4 4 4
38663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
38668+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
38669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38676+4 4 4 4 4 4
38677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38682+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
38683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38690+4 4 4 4 4 4
38691diff -urNp linux-3.0.7/drivers/video/udlfb.c linux-3.0.7/drivers/video/udlfb.c
38692--- linux-3.0.7/drivers/video/udlfb.c 2011-07-21 22:17:23.000000000 -0400
38693+++ linux-3.0.7/drivers/video/udlfb.c 2011-08-23 21:47:56.000000000 -0400
38694@@ -586,11 +586,11 @@ int dlfb_handle_damage(struct dlfb_data
38695 dlfb_urb_completion(urb);
38696
38697 error:
38698- atomic_add(bytes_sent, &dev->bytes_sent);
38699- atomic_add(bytes_identical, &dev->bytes_identical);
38700- atomic_add(width*height*2, &dev->bytes_rendered);
38701+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38702+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38703+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
38704 end_cycles = get_cycles();
38705- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38706+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38707 >> 10)), /* Kcycles */
38708 &dev->cpu_kcycles_used);
38709
38710@@ -711,11 +711,11 @@ static void dlfb_dpy_deferred_io(struct
38711 dlfb_urb_completion(urb);
38712
38713 error:
38714- atomic_add(bytes_sent, &dev->bytes_sent);
38715- atomic_add(bytes_identical, &dev->bytes_identical);
38716- atomic_add(bytes_rendered, &dev->bytes_rendered);
38717+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
38718+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
38719+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
38720 end_cycles = get_cycles();
38721- atomic_add(((unsigned int) ((end_cycles - start_cycles)
38722+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
38723 >> 10)), /* Kcycles */
38724 &dev->cpu_kcycles_used);
38725 }
38726@@ -1307,7 +1307,7 @@ static ssize_t metrics_bytes_rendered_sh
38727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38728 struct dlfb_data *dev = fb_info->par;
38729 return snprintf(buf, PAGE_SIZE, "%u\n",
38730- atomic_read(&dev->bytes_rendered));
38731+ atomic_read_unchecked(&dev->bytes_rendered));
38732 }
38733
38734 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
38735@@ -1315,7 +1315,7 @@ static ssize_t metrics_bytes_identical_s
38736 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38737 struct dlfb_data *dev = fb_info->par;
38738 return snprintf(buf, PAGE_SIZE, "%u\n",
38739- atomic_read(&dev->bytes_identical));
38740+ atomic_read_unchecked(&dev->bytes_identical));
38741 }
38742
38743 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
38744@@ -1323,7 +1323,7 @@ static ssize_t metrics_bytes_sent_show(s
38745 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38746 struct dlfb_data *dev = fb_info->par;
38747 return snprintf(buf, PAGE_SIZE, "%u\n",
38748- atomic_read(&dev->bytes_sent));
38749+ atomic_read_unchecked(&dev->bytes_sent));
38750 }
38751
38752 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
38753@@ -1331,7 +1331,7 @@ static ssize_t metrics_cpu_kcycles_used_
38754 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38755 struct dlfb_data *dev = fb_info->par;
38756 return snprintf(buf, PAGE_SIZE, "%u\n",
38757- atomic_read(&dev->cpu_kcycles_used));
38758+ atomic_read_unchecked(&dev->cpu_kcycles_used));
38759 }
38760
38761 static ssize_t edid_show(
38762@@ -1388,10 +1388,10 @@ static ssize_t metrics_reset_store(struc
38763 struct fb_info *fb_info = dev_get_drvdata(fbdev);
38764 struct dlfb_data *dev = fb_info->par;
38765
38766- atomic_set(&dev->bytes_rendered, 0);
38767- atomic_set(&dev->bytes_identical, 0);
38768- atomic_set(&dev->bytes_sent, 0);
38769- atomic_set(&dev->cpu_kcycles_used, 0);
38770+ atomic_set_unchecked(&dev->bytes_rendered, 0);
38771+ atomic_set_unchecked(&dev->bytes_identical, 0);
38772+ atomic_set_unchecked(&dev->bytes_sent, 0);
38773+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
38774
38775 return count;
38776 }
38777diff -urNp linux-3.0.7/drivers/video/uvesafb.c linux-3.0.7/drivers/video/uvesafb.c
38778--- linux-3.0.7/drivers/video/uvesafb.c 2011-07-21 22:17:23.000000000 -0400
38779+++ linux-3.0.7/drivers/video/uvesafb.c 2011-08-23 21:47:56.000000000 -0400
38780@@ -19,6 +19,7 @@
38781 #include <linux/io.h>
38782 #include <linux/mutex.h>
38783 #include <linux/slab.h>
38784+#include <linux/moduleloader.h>
38785 #include <video/edid.h>
38786 #include <video/uvesafb.h>
38787 #ifdef CONFIG_X86
38788@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
38789 NULL,
38790 };
38791
38792- return call_usermodehelper(v86d_path, argv, envp, 1);
38793+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38794 }
38795
38796 /*
38797@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
38798 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38799 par->pmi_setpal = par->ypan = 0;
38800 } else {
38801+
38802+#ifdef CONFIG_PAX_KERNEXEC
38803+#ifdef CONFIG_MODULES
38804+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38805+#endif
38806+ if (!par->pmi_code) {
38807+ par->pmi_setpal = par->ypan = 0;
38808+ return 0;
38809+ }
38810+#endif
38811+
38812 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38813 + task->t.regs.edi);
38814+
38815+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38816+ pax_open_kernel();
38817+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38818+ pax_close_kernel();
38819+
38820+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38821+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38822+#else
38823 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38824 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38825+#endif
38826+
38827 printk(KERN_INFO "uvesafb: protected mode interface info at "
38828 "%04x:%04x\n",
38829 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38830@@ -1821,6 +1844,11 @@ out:
38831 if (par->vbe_modes)
38832 kfree(par->vbe_modes);
38833
38834+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38835+ if (par->pmi_code)
38836+ module_free_exec(NULL, par->pmi_code);
38837+#endif
38838+
38839 framebuffer_release(info);
38840 return err;
38841 }
38842@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
38843 kfree(par->vbe_state_orig);
38844 if (par->vbe_state_saved)
38845 kfree(par->vbe_state_saved);
38846+
38847+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38848+ if (par->pmi_code)
38849+ module_free_exec(NULL, par->pmi_code);
38850+#endif
38851+
38852 }
38853
38854 framebuffer_release(info);
38855diff -urNp linux-3.0.7/drivers/video/vesafb.c linux-3.0.7/drivers/video/vesafb.c
38856--- linux-3.0.7/drivers/video/vesafb.c 2011-07-21 22:17:23.000000000 -0400
38857+++ linux-3.0.7/drivers/video/vesafb.c 2011-08-23 21:47:56.000000000 -0400
38858@@ -9,6 +9,7 @@
38859 */
38860
38861 #include <linux/module.h>
38862+#include <linux/moduleloader.h>
38863 #include <linux/kernel.h>
38864 #include <linux/errno.h>
38865 #include <linux/string.h>
38866@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
38867 static int vram_total __initdata; /* Set total amount of memory */
38868 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38869 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38870-static void (*pmi_start)(void) __read_mostly;
38871-static void (*pmi_pal) (void) __read_mostly;
38872+static void (*pmi_start)(void) __read_only;
38873+static void (*pmi_pal) (void) __read_only;
38874 static int depth __read_mostly;
38875 static int vga_compat __read_mostly;
38876 /* --------------------------------------------------------------------- */
38877@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38878 unsigned int size_vmode;
38879 unsigned int size_remap;
38880 unsigned int size_total;
38881+ void *pmi_code = NULL;
38882
38883 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38884 return -ENODEV;
38885@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38886 size_remap = size_total;
38887 vesafb_fix.smem_len = size_remap;
38888
38889-#ifndef __i386__
38890- screen_info.vesapm_seg = 0;
38891-#endif
38892-
38893 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38894 printk(KERN_WARNING
38895 "vesafb: cannot reserve video memory at 0x%lx\n",
38896@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
38897 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38898 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38899
38900+#ifdef __i386__
38901+
38902+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38903+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38904+ if (!pmi_code)
38905+#elif !defined(CONFIG_PAX_KERNEXEC)
38906+ if (0)
38907+#endif
38908+
38909+#endif
38910+ screen_info.vesapm_seg = 0;
38911+
38912 if (screen_info.vesapm_seg) {
38913- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38914- screen_info.vesapm_seg,screen_info.vesapm_off);
38915+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38916+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38917 }
38918
38919 if (screen_info.vesapm_seg < 0xc000)
38920@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
38921
38922 if (ypan || pmi_setpal) {
38923 unsigned short *pmi_base;
38924+
38925 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38926- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38927- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38928+
38929+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38930+ pax_open_kernel();
38931+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38932+#else
38933+ pmi_code = pmi_base;
38934+#endif
38935+
38936+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38937+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38938+
38939+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38940+ pmi_start = ktva_ktla(pmi_start);
38941+ pmi_pal = ktva_ktla(pmi_pal);
38942+ pax_close_kernel();
38943+#endif
38944+
38945 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38946 if (pmi_base[3]) {
38947 printk(KERN_INFO "vesafb: pmi: ports = ");
38948@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
38949 info->node, info->fix.id);
38950 return 0;
38951 err:
38952+
38953+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38954+ module_free_exec(NULL, pmi_code);
38955+#endif
38956+
38957 if (info->screen_base)
38958 iounmap(info->screen_base);
38959 framebuffer_release(info);
38960diff -urNp linux-3.0.7/drivers/video/via/via_clock.h linux-3.0.7/drivers/video/via/via_clock.h
38961--- linux-3.0.7/drivers/video/via/via_clock.h 2011-07-21 22:17:23.000000000 -0400
38962+++ linux-3.0.7/drivers/video/via/via_clock.h 2011-08-23 21:47:56.000000000 -0400
38963@@ -56,7 +56,7 @@ struct via_clock {
38964
38965 void (*set_engine_pll_state)(u8 state);
38966 void (*set_engine_pll)(struct via_pll_config config);
38967-};
38968+} __no_const;
38969
38970
38971 static inline u32 get_pll_internal_frequency(u32 ref_freq,
38972diff -urNp linux-3.0.7/drivers/virtio/virtio_balloon.c linux-3.0.7/drivers/virtio/virtio_balloon.c
38973--- linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-07-21 22:17:23.000000000 -0400
38974+++ linux-3.0.7/drivers/virtio/virtio_balloon.c 2011-08-23 21:48:14.000000000 -0400
38975@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
38976 struct sysinfo i;
38977 int idx = 0;
38978
38979+ pax_track_stack();
38980+
38981 all_vm_events(events);
38982 si_meminfo(&i);
38983
38984diff -urNp linux-3.0.7/fs/9p/vfs_inode.c linux-3.0.7/fs/9p/vfs_inode.c
38985--- linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:54:54.000000000 -0400
38986+++ linux-3.0.7/fs/9p/vfs_inode.c 2011-10-16 21:55:28.000000000 -0400
38987@@ -1264,7 +1264,7 @@ static void *v9fs_vfs_follow_link(struct
38988 void
38989 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38990 {
38991- char *s = nd_get_link(nd);
38992+ const char *s = nd_get_link(nd);
38993
38994 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38995 IS_ERR(s) ? "<error>" : s);
38996diff -urNp linux-3.0.7/fs/aio.c linux-3.0.7/fs/aio.c
38997--- linux-3.0.7/fs/aio.c 2011-07-21 22:17:23.000000000 -0400
38998+++ linux-3.0.7/fs/aio.c 2011-08-23 21:48:14.000000000 -0400
38999@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39000 size += sizeof(struct io_event) * nr_events;
39001 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39002
39003- if (nr_pages < 0)
39004+ if (nr_pages <= 0)
39005 return -EINVAL;
39006
39007 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39008@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39009 struct aio_timeout to;
39010 int retry = 0;
39011
39012+ pax_track_stack();
39013+
39014 /* needed to zero any padding within an entry (there shouldn't be
39015 * any, but C is fun!
39016 */
39017@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39018 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39019 {
39020 ssize_t ret;
39021+ struct iovec iovstack;
39022
39023 #ifdef CONFIG_COMPAT
39024 if (compat)
39025 ret = compat_rw_copy_check_uvector(type,
39026 (struct compat_iovec __user *)kiocb->ki_buf,
39027- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39028+ kiocb->ki_nbytes, 1, &iovstack,
39029 &kiocb->ki_iovec);
39030 else
39031 #endif
39032 ret = rw_copy_check_uvector(type,
39033 (struct iovec __user *)kiocb->ki_buf,
39034- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39035+ kiocb->ki_nbytes, 1, &iovstack,
39036 &kiocb->ki_iovec);
39037 if (ret < 0)
39038 goto out;
39039
39040+ if (kiocb->ki_iovec == &iovstack) {
39041+ kiocb->ki_inline_vec = iovstack;
39042+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39043+ }
39044 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39045 kiocb->ki_cur_seg = 0;
39046 /* ki_nbytes/left now reflect bytes instead of segs */
39047diff -urNp linux-3.0.7/fs/attr.c linux-3.0.7/fs/attr.c
39048--- linux-3.0.7/fs/attr.c 2011-07-21 22:17:23.000000000 -0400
39049+++ linux-3.0.7/fs/attr.c 2011-08-23 21:48:14.000000000 -0400
39050@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39051 unsigned long limit;
39052
39053 limit = rlimit(RLIMIT_FSIZE);
39054+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39055 if (limit != RLIM_INFINITY && offset > limit)
39056 goto out_sig;
39057 if (offset > inode->i_sb->s_maxbytes)
39058diff -urNp linux-3.0.7/fs/autofs4/waitq.c linux-3.0.7/fs/autofs4/waitq.c
39059--- linux-3.0.7/fs/autofs4/waitq.c 2011-07-21 22:17:23.000000000 -0400
39060+++ linux-3.0.7/fs/autofs4/waitq.c 2011-10-06 04:17:55.000000000 -0400
39061@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39062 {
39063 unsigned long sigpipe, flags;
39064 mm_segment_t fs;
39065- const char *data = (const char *)addr;
39066+ const char __user *data = (const char __force_user *)addr;
39067 ssize_t wr = 0;
39068
39069 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39070diff -urNp linux-3.0.7/fs/befs/linuxvfs.c linux-3.0.7/fs/befs/linuxvfs.c
39071--- linux-3.0.7/fs/befs/linuxvfs.c 2011-09-02 18:11:26.000000000 -0400
39072+++ linux-3.0.7/fs/befs/linuxvfs.c 2011-08-29 23:26:27.000000000 -0400
39073@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39074 {
39075 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39076 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39077- char *link = nd_get_link(nd);
39078+ const char *link = nd_get_link(nd);
39079 if (!IS_ERR(link))
39080 kfree(link);
39081 }
39082diff -urNp linux-3.0.7/fs/binfmt_aout.c linux-3.0.7/fs/binfmt_aout.c
39083--- linux-3.0.7/fs/binfmt_aout.c 2011-07-21 22:17:23.000000000 -0400
39084+++ linux-3.0.7/fs/binfmt_aout.c 2011-08-23 21:48:14.000000000 -0400
39085@@ -16,6 +16,7 @@
39086 #include <linux/string.h>
39087 #include <linux/fs.h>
39088 #include <linux/file.h>
39089+#include <linux/security.h>
39090 #include <linux/stat.h>
39091 #include <linux/fcntl.h>
39092 #include <linux/ptrace.h>
39093@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39094 #endif
39095 # define START_STACK(u) ((void __user *)u.start_stack)
39096
39097+ memset(&dump, 0, sizeof(dump));
39098+
39099 fs = get_fs();
39100 set_fs(KERNEL_DS);
39101 has_dumped = 1;
39102@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39103
39104 /* If the size of the dump file exceeds the rlimit, then see what would happen
39105 if we wrote the stack, but not the data area. */
39106+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39107 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39108 dump.u_dsize = 0;
39109
39110 /* Make sure we have enough room to write the stack and data areas. */
39111+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39112 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39113 dump.u_ssize = 0;
39114
39115@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39116 rlim = rlimit(RLIMIT_DATA);
39117 if (rlim >= RLIM_INFINITY)
39118 rlim = ~0;
39119+
39120+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39121 if (ex.a_data + ex.a_bss > rlim)
39122 return -ENOMEM;
39123
39124@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39125 install_exec_creds(bprm);
39126 current->flags &= ~PF_FORKNOEXEC;
39127
39128+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39129+ current->mm->pax_flags = 0UL;
39130+#endif
39131+
39132+#ifdef CONFIG_PAX_PAGEEXEC
39133+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39134+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39135+
39136+#ifdef CONFIG_PAX_EMUTRAMP
39137+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39138+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39139+#endif
39140+
39141+#ifdef CONFIG_PAX_MPROTECT
39142+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39143+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39144+#endif
39145+
39146+ }
39147+#endif
39148+
39149 if (N_MAGIC(ex) == OMAGIC) {
39150 unsigned long text_addr, map_size;
39151 loff_t pos;
39152@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39153
39154 down_write(&current->mm->mmap_sem);
39155 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39156- PROT_READ | PROT_WRITE | PROT_EXEC,
39157+ PROT_READ | PROT_WRITE,
39158 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39159 fd_offset + ex.a_text);
39160 up_write(&current->mm->mmap_sem);
39161diff -urNp linux-3.0.7/fs/binfmt_elf.c linux-3.0.7/fs/binfmt_elf.c
39162--- linux-3.0.7/fs/binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
39163+++ linux-3.0.7/fs/binfmt_elf.c 2011-08-23 21:48:14.000000000 -0400
39164@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39165 #define elf_core_dump NULL
39166 #endif
39167
39168+#ifdef CONFIG_PAX_MPROTECT
39169+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39170+#endif
39171+
39172 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39173 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39174 #else
39175@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39176 .load_binary = load_elf_binary,
39177 .load_shlib = load_elf_library,
39178 .core_dump = elf_core_dump,
39179+
39180+#ifdef CONFIG_PAX_MPROTECT
39181+ .handle_mprotect= elf_handle_mprotect,
39182+#endif
39183+
39184 .min_coredump = ELF_EXEC_PAGESIZE,
39185 };
39186
39187@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39188
39189 static int set_brk(unsigned long start, unsigned long end)
39190 {
39191+ unsigned long e = end;
39192+
39193 start = ELF_PAGEALIGN(start);
39194 end = ELF_PAGEALIGN(end);
39195 if (end > start) {
39196@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39197 if (BAD_ADDR(addr))
39198 return addr;
39199 }
39200- current->mm->start_brk = current->mm->brk = end;
39201+ current->mm->start_brk = current->mm->brk = e;
39202 return 0;
39203 }
39204
39205@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39206 elf_addr_t __user *u_rand_bytes;
39207 const char *k_platform = ELF_PLATFORM;
39208 const char *k_base_platform = ELF_BASE_PLATFORM;
39209- unsigned char k_rand_bytes[16];
39210+ u32 k_rand_bytes[4];
39211 int items;
39212 elf_addr_t *elf_info;
39213 int ei_index = 0;
39214 const struct cred *cred = current_cred();
39215 struct vm_area_struct *vma;
39216+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39217+
39218+ pax_track_stack();
39219
39220 /*
39221 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39222@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39223 * Generate 16 random bytes for userspace PRNG seeding.
39224 */
39225 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39226- u_rand_bytes = (elf_addr_t __user *)
39227- STACK_ALLOC(p, sizeof(k_rand_bytes));
39228+ srandom32(k_rand_bytes[0] ^ random32());
39229+ srandom32(k_rand_bytes[1] ^ random32());
39230+ srandom32(k_rand_bytes[2] ^ random32());
39231+ srandom32(k_rand_bytes[3] ^ random32());
39232+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39233+ u_rand_bytes = (elf_addr_t __user *) p;
39234 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39235 return -EFAULT;
39236
39237@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39238 return -EFAULT;
39239 current->mm->env_end = p;
39240
39241+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39242+
39243 /* Put the elf_info on the stack in the right place. */
39244 sp = (elf_addr_t __user *)envp + 1;
39245- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39246+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39247 return -EFAULT;
39248 return 0;
39249 }
39250@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39251 {
39252 struct elf_phdr *elf_phdata;
39253 struct elf_phdr *eppnt;
39254- unsigned long load_addr = 0;
39255+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39256 int load_addr_set = 0;
39257 unsigned long last_bss = 0, elf_bss = 0;
39258- unsigned long error = ~0UL;
39259+ unsigned long error = -EINVAL;
39260 unsigned long total_size;
39261 int retval, i, size;
39262
39263@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39264 goto out_close;
39265 }
39266
39267+#ifdef CONFIG_PAX_SEGMEXEC
39268+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39269+ pax_task_size = SEGMEXEC_TASK_SIZE;
39270+#endif
39271+
39272 eppnt = elf_phdata;
39273 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39274 if (eppnt->p_type == PT_LOAD) {
39275@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39276 k = load_addr + eppnt->p_vaddr;
39277 if (BAD_ADDR(k) ||
39278 eppnt->p_filesz > eppnt->p_memsz ||
39279- eppnt->p_memsz > TASK_SIZE ||
39280- TASK_SIZE - eppnt->p_memsz < k) {
39281+ eppnt->p_memsz > pax_task_size ||
39282+ pax_task_size - eppnt->p_memsz < k) {
39283 error = -ENOMEM;
39284 goto out_close;
39285 }
39286@@ -528,6 +553,193 @@ out:
39287 return error;
39288 }
39289
39290+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39291+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39292+{
39293+ unsigned long pax_flags = 0UL;
39294+
39295+#ifdef CONFIG_PAX_PAGEEXEC
39296+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39297+ pax_flags |= MF_PAX_PAGEEXEC;
39298+#endif
39299+
39300+#ifdef CONFIG_PAX_SEGMEXEC
39301+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39302+ pax_flags |= MF_PAX_SEGMEXEC;
39303+#endif
39304+
39305+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39306+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39307+ if ((__supported_pte_mask & _PAGE_NX))
39308+ pax_flags &= ~MF_PAX_SEGMEXEC;
39309+ else
39310+ pax_flags &= ~MF_PAX_PAGEEXEC;
39311+ }
39312+#endif
39313+
39314+#ifdef CONFIG_PAX_EMUTRAMP
39315+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39316+ pax_flags |= MF_PAX_EMUTRAMP;
39317+#endif
39318+
39319+#ifdef CONFIG_PAX_MPROTECT
39320+ if (elf_phdata->p_flags & PF_MPROTECT)
39321+ pax_flags |= MF_PAX_MPROTECT;
39322+#endif
39323+
39324+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39325+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39326+ pax_flags |= MF_PAX_RANDMMAP;
39327+#endif
39328+
39329+ return pax_flags;
39330+}
39331+#endif
39332+
39333+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39334+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39335+{
39336+ unsigned long pax_flags = 0UL;
39337+
39338+#ifdef CONFIG_PAX_PAGEEXEC
39339+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39340+ pax_flags |= MF_PAX_PAGEEXEC;
39341+#endif
39342+
39343+#ifdef CONFIG_PAX_SEGMEXEC
39344+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39345+ pax_flags |= MF_PAX_SEGMEXEC;
39346+#endif
39347+
39348+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39349+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39350+ if ((__supported_pte_mask & _PAGE_NX))
39351+ pax_flags &= ~MF_PAX_SEGMEXEC;
39352+ else
39353+ pax_flags &= ~MF_PAX_PAGEEXEC;
39354+ }
39355+#endif
39356+
39357+#ifdef CONFIG_PAX_EMUTRAMP
39358+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39359+ pax_flags |= MF_PAX_EMUTRAMP;
39360+#endif
39361+
39362+#ifdef CONFIG_PAX_MPROTECT
39363+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39364+ pax_flags |= MF_PAX_MPROTECT;
39365+#endif
39366+
39367+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39368+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39369+ pax_flags |= MF_PAX_RANDMMAP;
39370+#endif
39371+
39372+ return pax_flags;
39373+}
39374+#endif
39375+
39376+#ifdef CONFIG_PAX_EI_PAX
39377+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39378+{
39379+ unsigned long pax_flags = 0UL;
39380+
39381+#ifdef CONFIG_PAX_PAGEEXEC
39382+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39383+ pax_flags |= MF_PAX_PAGEEXEC;
39384+#endif
39385+
39386+#ifdef CONFIG_PAX_SEGMEXEC
39387+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39388+ pax_flags |= MF_PAX_SEGMEXEC;
39389+#endif
39390+
39391+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39392+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39393+ if ((__supported_pte_mask & _PAGE_NX))
39394+ pax_flags &= ~MF_PAX_SEGMEXEC;
39395+ else
39396+ pax_flags &= ~MF_PAX_PAGEEXEC;
39397+ }
39398+#endif
39399+
39400+#ifdef CONFIG_PAX_EMUTRAMP
39401+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39402+ pax_flags |= MF_PAX_EMUTRAMP;
39403+#endif
39404+
39405+#ifdef CONFIG_PAX_MPROTECT
39406+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39407+ pax_flags |= MF_PAX_MPROTECT;
39408+#endif
39409+
39410+#ifdef CONFIG_PAX_ASLR
39411+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39412+ pax_flags |= MF_PAX_RANDMMAP;
39413+#endif
39414+
39415+ return pax_flags;
39416+}
39417+#endif
39418+
39419+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39420+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39421+{
39422+ unsigned long pax_flags = 0UL;
39423+
39424+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39425+ unsigned long i;
39426+ int found_flags = 0;
39427+#endif
39428+
39429+#ifdef CONFIG_PAX_EI_PAX
39430+ pax_flags = pax_parse_ei_pax(elf_ex);
39431+#endif
39432+
39433+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39434+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39435+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39436+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39437+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39438+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39439+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39440+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39441+ return -EINVAL;
39442+
39443+#ifdef CONFIG_PAX_SOFTMODE
39444+ if (pax_softmode)
39445+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39446+ else
39447+#endif
39448+
39449+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39450+ found_flags = 1;
39451+ break;
39452+ }
39453+#endif
39454+
39455+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39456+ if (found_flags == 0) {
39457+ struct elf_phdr phdr;
39458+ memset(&phdr, 0, sizeof(phdr));
39459+ phdr.p_flags = PF_NOEMUTRAMP;
39460+#ifdef CONFIG_PAX_SOFTMODE
39461+ if (pax_softmode)
39462+ pax_flags = pax_parse_softmode(&phdr);
39463+ else
39464+#endif
39465+ pax_flags = pax_parse_hardmode(&phdr);
39466+ }
39467+#endif
39468+
39469+ if (0 > pax_check_flags(&pax_flags))
39470+ return -EINVAL;
39471+
39472+ current->mm->pax_flags = pax_flags;
39473+ return 0;
39474+}
39475+#endif
39476+
39477 /*
39478 * These are the functions used to load ELF style executables and shared
39479 * libraries. There is no binary dependent code anywhere else.
39480@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
39481 {
39482 unsigned int random_variable = 0;
39483
39484+#ifdef CONFIG_PAX_RANDUSTACK
39485+ if (randomize_va_space)
39486+ return stack_top - current->mm->delta_stack;
39487+#endif
39488+
39489 if ((current->flags & PF_RANDOMIZE) &&
39490 !(current->personality & ADDR_NO_RANDOMIZE)) {
39491 random_variable = get_random_int() & STACK_RND_MASK;
39492@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
39493 unsigned long load_addr = 0, load_bias = 0;
39494 int load_addr_set = 0;
39495 char * elf_interpreter = NULL;
39496- unsigned long error;
39497+ unsigned long error = 0;
39498 struct elf_phdr *elf_ppnt, *elf_phdata;
39499 unsigned long elf_bss, elf_brk;
39500 int retval, i;
39501@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
39502 unsigned long start_code, end_code, start_data, end_data;
39503 unsigned long reloc_func_desc __maybe_unused = 0;
39504 int executable_stack = EXSTACK_DEFAULT;
39505- unsigned long def_flags = 0;
39506 struct {
39507 struct elfhdr elf_ex;
39508 struct elfhdr interp_elf_ex;
39509 } *loc;
39510+ unsigned long pax_task_size = TASK_SIZE;
39511
39512 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39513 if (!loc) {
39514@@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
39515
39516 /* OK, This is the point of no return */
39517 current->flags &= ~PF_FORKNOEXEC;
39518- current->mm->def_flags = def_flags;
39519+
39520+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39521+ current->mm->pax_flags = 0UL;
39522+#endif
39523+
39524+#ifdef CONFIG_PAX_DLRESOLVE
39525+ current->mm->call_dl_resolve = 0UL;
39526+#endif
39527+
39528+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39529+ current->mm->call_syscall = 0UL;
39530+#endif
39531+
39532+#ifdef CONFIG_PAX_ASLR
39533+ current->mm->delta_mmap = 0UL;
39534+ current->mm->delta_stack = 0UL;
39535+#endif
39536+
39537+ current->mm->def_flags = 0;
39538+
39539+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39540+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39541+ send_sig(SIGKILL, current, 0);
39542+ goto out_free_dentry;
39543+ }
39544+#endif
39545+
39546+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39547+ pax_set_initial_flags(bprm);
39548+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39549+ if (pax_set_initial_flags_func)
39550+ (pax_set_initial_flags_func)(bprm);
39551+#endif
39552+
39553+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39554+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
39555+ current->mm->context.user_cs_limit = PAGE_SIZE;
39556+ current->mm->def_flags |= VM_PAGEEXEC;
39557+ }
39558+#endif
39559+
39560+#ifdef CONFIG_PAX_SEGMEXEC
39561+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39562+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39563+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39564+ pax_task_size = SEGMEXEC_TASK_SIZE;
39565+ current->mm->def_flags |= VM_NOHUGEPAGE;
39566+ }
39567+#endif
39568+
39569+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39570+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39571+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39572+ put_cpu();
39573+ }
39574+#endif
39575
39576 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39577 may depend on the personality. */
39578 SET_PERSONALITY(loc->elf_ex);
39579+
39580+#ifdef CONFIG_PAX_ASLR
39581+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39582+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39583+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39584+ }
39585+#endif
39586+
39587+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39588+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39589+ executable_stack = EXSTACK_DISABLE_X;
39590+ current->personality &= ~READ_IMPLIES_EXEC;
39591+ } else
39592+#endif
39593+
39594 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39595 current->personality |= READ_IMPLIES_EXEC;
39596
39597@@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
39598 #else
39599 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39600 #endif
39601+
39602+#ifdef CONFIG_PAX_RANDMMAP
39603+ /* PaX: randomize base address at the default exe base if requested */
39604+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39605+#ifdef CONFIG_SPARC64
39606+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39607+#else
39608+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39609+#endif
39610+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39611+ elf_flags |= MAP_FIXED;
39612+ }
39613+#endif
39614+
39615 }
39616
39617 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39618@@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
39619 * allowed task size. Note that p_filesz must always be
39620 * <= p_memsz so it is only necessary to check p_memsz.
39621 */
39622- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39623- elf_ppnt->p_memsz > TASK_SIZE ||
39624- TASK_SIZE - elf_ppnt->p_memsz < k) {
39625+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39626+ elf_ppnt->p_memsz > pax_task_size ||
39627+ pax_task_size - elf_ppnt->p_memsz < k) {
39628 /* set_brk can never work. Avoid overflows. */
39629 send_sig(SIGKILL, current, 0);
39630 retval = -EINVAL;
39631@@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
39632 start_data += load_bias;
39633 end_data += load_bias;
39634
39635+#ifdef CONFIG_PAX_RANDMMAP
39636+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39637+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39638+#endif
39639+
39640 /* Calling set_brk effectively mmaps the pages that we need
39641 * for the bss and break sections. We must do this before
39642 * mapping in the interpreter, to make sure it doesn't wind
39643@@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
39644 goto out_free_dentry;
39645 }
39646 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39647- send_sig(SIGSEGV, current, 0);
39648- retval = -EFAULT; /* Nobody gets to see this, but.. */
39649- goto out_free_dentry;
39650+ /*
39651+ * This bss-zeroing can fail if the ELF
39652+ * file specifies odd protections. So
39653+ * we don't check the return value
39654+ */
39655 }
39656
39657 if (elf_interpreter) {
39658@@ -1090,7 +1398,7 @@ out:
39659 * Decide what to dump of a segment, part, all or none.
39660 */
39661 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39662- unsigned long mm_flags)
39663+ unsigned long mm_flags, long signr)
39664 {
39665 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39666
39667@@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
39668 if (vma->vm_file == NULL)
39669 return 0;
39670
39671- if (FILTER(MAPPED_PRIVATE))
39672+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39673 goto whole;
39674
39675 /*
39676@@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
39677 {
39678 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39679 int i = 0;
39680- do
39681+ do {
39682 i += 2;
39683- while (auxv[i - 2] != AT_NULL);
39684+ } while (auxv[i - 2] != AT_NULL);
39685 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39686 }
39687
39688@@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
39689 }
39690
39691 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
39692- unsigned long mm_flags)
39693+ struct coredump_params *cprm)
39694 {
39695 struct vm_area_struct *vma;
39696 size_t size = 0;
39697
39698 for (vma = first_vma(current, gate_vma); vma != NULL;
39699 vma = next_vma(vma, gate_vma))
39700- size += vma_dump_size(vma, mm_flags);
39701+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39702 return size;
39703 }
39704
39705@@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
39706
39707 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
39708
39709- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
39710+ offset += elf_core_vma_data_size(gate_vma, cprm);
39711 offset += elf_core_extra_data_size();
39712 e_shoff = offset;
39713
39714@@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
39715 offset = dataoff;
39716
39717 size += sizeof(*elf);
39718+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39719 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
39720 goto end_coredump;
39721
39722 size += sizeof(*phdr4note);
39723+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39724 if (size > cprm->limit
39725 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
39726 goto end_coredump;
39727@@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
39728 phdr.p_offset = offset;
39729 phdr.p_vaddr = vma->vm_start;
39730 phdr.p_paddr = 0;
39731- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
39732+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39733 phdr.p_memsz = vma->vm_end - vma->vm_start;
39734 offset += phdr.p_filesz;
39735 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39736@@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
39737 phdr.p_align = ELF_EXEC_PAGESIZE;
39738
39739 size += sizeof(phdr);
39740+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39741 if (size > cprm->limit
39742 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
39743 goto end_coredump;
39744@@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
39745 unsigned long addr;
39746 unsigned long end;
39747
39748- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
39749+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
39750
39751 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39752 struct page *page;
39753@@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
39754 page = get_dump_page(addr);
39755 if (page) {
39756 void *kaddr = kmap(page);
39757+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39758 stop = ((size += PAGE_SIZE) > cprm->limit) ||
39759 !dump_write(cprm->file, kaddr,
39760 PAGE_SIZE);
39761@@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
39762
39763 if (e_phnum == PN_XNUM) {
39764 size += sizeof(*shdr4extnum);
39765+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
39766 if (size > cprm->limit
39767 || !dump_write(cprm->file, shdr4extnum,
39768 sizeof(*shdr4extnum)))
39769@@ -2067,6 +2380,97 @@ out:
39770
39771 #endif /* CONFIG_ELF_CORE */
39772
39773+#ifdef CONFIG_PAX_MPROTECT
39774+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39775+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39776+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39777+ *
39778+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39779+ * basis because we want to allow the common case and not the special ones.
39780+ */
39781+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39782+{
39783+ struct elfhdr elf_h;
39784+ struct elf_phdr elf_p;
39785+ unsigned long i;
39786+ unsigned long oldflags;
39787+ bool is_textrel_rw, is_textrel_rx, is_relro;
39788+
39789+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39790+ return;
39791+
39792+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39793+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39794+
39795+#ifdef CONFIG_PAX_ELFRELOCS
39796+ /* possible TEXTREL */
39797+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39798+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39799+#else
39800+ is_textrel_rw = false;
39801+ is_textrel_rx = false;
39802+#endif
39803+
39804+ /* possible RELRO */
39805+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39806+
39807+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39808+ return;
39809+
39810+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39811+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39812+
39813+#ifdef CONFIG_PAX_ETEXECRELOCS
39814+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39815+#else
39816+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39817+#endif
39818+
39819+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39820+ !elf_check_arch(&elf_h) ||
39821+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39822+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39823+ return;
39824+
39825+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39826+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39827+ return;
39828+ switch (elf_p.p_type) {
39829+ case PT_DYNAMIC:
39830+ if (!is_textrel_rw && !is_textrel_rx)
39831+ continue;
39832+ i = 0UL;
39833+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39834+ elf_dyn dyn;
39835+
39836+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39837+ return;
39838+ if (dyn.d_tag == DT_NULL)
39839+ return;
39840+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39841+ gr_log_textrel(vma);
39842+ if (is_textrel_rw)
39843+ vma->vm_flags |= VM_MAYWRITE;
39844+ else
39845+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39846+ vma->vm_flags &= ~VM_MAYWRITE;
39847+ return;
39848+ }
39849+ i++;
39850+ }
39851+ return;
39852+
39853+ case PT_GNU_RELRO:
39854+ if (!is_relro)
39855+ continue;
39856+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39857+ vma->vm_flags &= ~VM_MAYWRITE;
39858+ return;
39859+ }
39860+ }
39861+}
39862+#endif
39863+
39864 static int __init init_elf_binfmt(void)
39865 {
39866 return register_binfmt(&elf_format);
39867diff -urNp linux-3.0.7/fs/binfmt_flat.c linux-3.0.7/fs/binfmt_flat.c
39868--- linux-3.0.7/fs/binfmt_flat.c 2011-07-21 22:17:23.000000000 -0400
39869+++ linux-3.0.7/fs/binfmt_flat.c 2011-08-23 21:47:56.000000000 -0400
39870@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
39871 realdatastart = (unsigned long) -ENOMEM;
39872 printk("Unable to allocate RAM for process data, errno %d\n",
39873 (int)-realdatastart);
39874+ down_write(&current->mm->mmap_sem);
39875 do_munmap(current->mm, textpos, text_len);
39876+ up_write(&current->mm->mmap_sem);
39877 ret = realdatastart;
39878 goto err;
39879 }
39880@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
39881 }
39882 if (IS_ERR_VALUE(result)) {
39883 printk("Unable to read data+bss, errno %d\n", (int)-result);
39884+ down_write(&current->mm->mmap_sem);
39885 do_munmap(current->mm, textpos, text_len);
39886 do_munmap(current->mm, realdatastart, len);
39887+ up_write(&current->mm->mmap_sem);
39888 ret = result;
39889 goto err;
39890 }
39891@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
39892 }
39893 if (IS_ERR_VALUE(result)) {
39894 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39895+ down_write(&current->mm->mmap_sem);
39896 do_munmap(current->mm, textpos, text_len + data_len + extra +
39897 MAX_SHARED_LIBS * sizeof(unsigned long));
39898+ up_write(&current->mm->mmap_sem);
39899 ret = result;
39900 goto err;
39901 }
39902diff -urNp linux-3.0.7/fs/bio.c linux-3.0.7/fs/bio.c
39903--- linux-3.0.7/fs/bio.c 2011-07-21 22:17:23.000000000 -0400
39904+++ linux-3.0.7/fs/bio.c 2011-10-06 04:17:55.000000000 -0400
39905@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
39906 const int read = bio_data_dir(bio) == READ;
39907 struct bio_map_data *bmd = bio->bi_private;
39908 int i;
39909- char *p = bmd->sgvecs[0].iov_base;
39910+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
39911
39912 __bio_for_each_segment(bvec, bio, i, 0) {
39913 char *addr = page_address(bvec->bv_page);
39914diff -urNp linux-3.0.7/fs/block_dev.c linux-3.0.7/fs/block_dev.c
39915--- linux-3.0.7/fs/block_dev.c 2011-10-16 21:54:54.000000000 -0400
39916+++ linux-3.0.7/fs/block_dev.c 2011-10-16 21:55:28.000000000 -0400
39917@@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
39918 else if (bdev->bd_contains == bdev)
39919 return true; /* is a whole device which isn't held */
39920
39921- else if (whole->bd_holder == bd_may_claim)
39922+ else if (whole->bd_holder == (void *)bd_may_claim)
39923 return true; /* is a partition of a device that is being partitioned */
39924 else if (whole->bd_holder != NULL)
39925 return false; /* is a partition of a held device */
39926diff -urNp linux-3.0.7/fs/btrfs/ctree.c linux-3.0.7/fs/btrfs/ctree.c
39927--- linux-3.0.7/fs/btrfs/ctree.c 2011-07-21 22:17:23.000000000 -0400
39928+++ linux-3.0.7/fs/btrfs/ctree.c 2011-08-23 21:47:56.000000000 -0400
39929@@ -454,9 +454,12 @@ static noinline int __btrfs_cow_block(st
39930 free_extent_buffer(buf);
39931 add_root_to_dirty_list(root);
39932 } else {
39933- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39934- parent_start = parent->start;
39935- else
39936+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39937+ if (parent)
39938+ parent_start = parent->start;
39939+ else
39940+ parent_start = 0;
39941+ } else
39942 parent_start = 0;
39943
39944 WARN_ON(trans->transid != btrfs_header_generation(parent));
39945diff -urNp linux-3.0.7/fs/btrfs/inode.c linux-3.0.7/fs/btrfs/inode.c
39946--- linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:54:54.000000000 -0400
39947+++ linux-3.0.7/fs/btrfs/inode.c 2011-10-16 21:55:28.000000000 -0400
39948@@ -6896,7 +6896,7 @@ fail:
39949 return -ENOMEM;
39950 }
39951
39952-static int btrfs_getattr(struct vfsmount *mnt,
39953+int btrfs_getattr(struct vfsmount *mnt,
39954 struct dentry *dentry, struct kstat *stat)
39955 {
39956 struct inode *inode = dentry->d_inode;
39957@@ -6908,6 +6908,14 @@ static int btrfs_getattr(struct vfsmount
39958 return 0;
39959 }
39960
39961+EXPORT_SYMBOL(btrfs_getattr);
39962+
39963+dev_t get_btrfs_dev_from_inode(struct inode *inode)
39964+{
39965+ return BTRFS_I(inode)->root->anon_super.s_dev;
39966+}
39967+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39968+
39969 /*
39970 * If a file is moved, it will inherit the cow and compression flags of the new
39971 * directory.
39972diff -urNp linux-3.0.7/fs/btrfs/ioctl.c linux-3.0.7/fs/btrfs/ioctl.c
39973--- linux-3.0.7/fs/btrfs/ioctl.c 2011-07-21 22:17:23.000000000 -0400
39974+++ linux-3.0.7/fs/btrfs/ioctl.c 2011-10-06 04:17:55.000000000 -0400
39975@@ -2676,9 +2676,12 @@ long btrfs_ioctl_space_info(struct btrfs
39976 for (i = 0; i < num_types; i++) {
39977 struct btrfs_space_info *tmp;
39978
39979+ /* Don't copy in more than we allocated */
39980 if (!slot_count)
39981 break;
39982
39983+ slot_count--;
39984+
39985 info = NULL;
39986 rcu_read_lock();
39987 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
39988@@ -2700,15 +2703,12 @@ long btrfs_ioctl_space_info(struct btrfs
39989 memcpy(dest, &space, sizeof(space));
39990 dest++;
39991 space_args.total_spaces++;
39992- slot_count--;
39993 }
39994- if (!slot_count)
39995- break;
39996 }
39997 up_read(&info->groups_sem);
39998 }
39999
40000- user_dest = (struct btrfs_ioctl_space_info *)
40001+ user_dest = (struct btrfs_ioctl_space_info __user *)
40002 (arg + sizeof(struct btrfs_ioctl_space_args));
40003
40004 if (copy_to_user(user_dest, dest_orig, alloc_size))
40005diff -urNp linux-3.0.7/fs/btrfs/relocation.c linux-3.0.7/fs/btrfs/relocation.c
40006--- linux-3.0.7/fs/btrfs/relocation.c 2011-07-21 22:17:23.000000000 -0400
40007+++ linux-3.0.7/fs/btrfs/relocation.c 2011-08-23 21:47:56.000000000 -0400
40008@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40009 }
40010 spin_unlock(&rc->reloc_root_tree.lock);
40011
40012- BUG_ON((struct btrfs_root *)node->data != root);
40013+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40014
40015 if (!del) {
40016 spin_lock(&rc->reloc_root_tree.lock);
40017diff -urNp linux-3.0.7/fs/cachefiles/bind.c linux-3.0.7/fs/cachefiles/bind.c
40018--- linux-3.0.7/fs/cachefiles/bind.c 2011-07-21 22:17:23.000000000 -0400
40019+++ linux-3.0.7/fs/cachefiles/bind.c 2011-08-23 21:47:56.000000000 -0400
40020@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40021 args);
40022
40023 /* start by checking things over */
40024- ASSERT(cache->fstop_percent >= 0 &&
40025- cache->fstop_percent < cache->fcull_percent &&
40026+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40027 cache->fcull_percent < cache->frun_percent &&
40028 cache->frun_percent < 100);
40029
40030- ASSERT(cache->bstop_percent >= 0 &&
40031- cache->bstop_percent < cache->bcull_percent &&
40032+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40033 cache->bcull_percent < cache->brun_percent &&
40034 cache->brun_percent < 100);
40035
40036diff -urNp linux-3.0.7/fs/cachefiles/daemon.c linux-3.0.7/fs/cachefiles/daemon.c
40037--- linux-3.0.7/fs/cachefiles/daemon.c 2011-07-21 22:17:23.000000000 -0400
40038+++ linux-3.0.7/fs/cachefiles/daemon.c 2011-08-23 21:47:56.000000000 -0400
40039@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40040 if (n > buflen)
40041 return -EMSGSIZE;
40042
40043- if (copy_to_user(_buffer, buffer, n) != 0)
40044+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40045 return -EFAULT;
40046
40047 return n;
40048@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40049 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40050 return -EIO;
40051
40052- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40053+ if (datalen > PAGE_SIZE - 1)
40054 return -EOPNOTSUPP;
40055
40056 /* drag the command string into the kernel so we can parse it */
40057@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40058 if (args[0] != '%' || args[1] != '\0')
40059 return -EINVAL;
40060
40061- if (fstop < 0 || fstop >= cache->fcull_percent)
40062+ if (fstop >= cache->fcull_percent)
40063 return cachefiles_daemon_range_error(cache, args);
40064
40065 cache->fstop_percent = fstop;
40066@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40067 if (args[0] != '%' || args[1] != '\0')
40068 return -EINVAL;
40069
40070- if (bstop < 0 || bstop >= cache->bcull_percent)
40071+ if (bstop >= cache->bcull_percent)
40072 return cachefiles_daemon_range_error(cache, args);
40073
40074 cache->bstop_percent = bstop;
40075diff -urNp linux-3.0.7/fs/cachefiles/internal.h linux-3.0.7/fs/cachefiles/internal.h
40076--- linux-3.0.7/fs/cachefiles/internal.h 2011-07-21 22:17:23.000000000 -0400
40077+++ linux-3.0.7/fs/cachefiles/internal.h 2011-08-23 21:47:56.000000000 -0400
40078@@ -57,7 +57,7 @@ struct cachefiles_cache {
40079 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40080 struct rb_root active_nodes; /* active nodes (can't be culled) */
40081 rwlock_t active_lock; /* lock for active_nodes */
40082- atomic_t gravecounter; /* graveyard uniquifier */
40083+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40084 unsigned frun_percent; /* when to stop culling (% files) */
40085 unsigned fcull_percent; /* when to start culling (% files) */
40086 unsigned fstop_percent; /* when to stop allocating (% files) */
40087@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40088 * proc.c
40089 */
40090 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40091-extern atomic_t cachefiles_lookup_histogram[HZ];
40092-extern atomic_t cachefiles_mkdir_histogram[HZ];
40093-extern atomic_t cachefiles_create_histogram[HZ];
40094+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40095+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40096+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40097
40098 extern int __init cachefiles_proc_init(void);
40099 extern void cachefiles_proc_cleanup(void);
40100 static inline
40101-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40102+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40103 {
40104 unsigned long jif = jiffies - start_jif;
40105 if (jif >= HZ)
40106 jif = HZ - 1;
40107- atomic_inc(&histogram[jif]);
40108+ atomic_inc_unchecked(&histogram[jif]);
40109 }
40110
40111 #else
40112diff -urNp linux-3.0.7/fs/cachefiles/namei.c linux-3.0.7/fs/cachefiles/namei.c
40113--- linux-3.0.7/fs/cachefiles/namei.c 2011-07-21 22:17:23.000000000 -0400
40114+++ linux-3.0.7/fs/cachefiles/namei.c 2011-08-23 21:47:56.000000000 -0400
40115@@ -318,7 +318,7 @@ try_again:
40116 /* first step is to make up a grave dentry in the graveyard */
40117 sprintf(nbuffer, "%08x%08x",
40118 (uint32_t) get_seconds(),
40119- (uint32_t) atomic_inc_return(&cache->gravecounter));
40120+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40121
40122 /* do the multiway lock magic */
40123 trap = lock_rename(cache->graveyard, dir);
40124diff -urNp linux-3.0.7/fs/cachefiles/proc.c linux-3.0.7/fs/cachefiles/proc.c
40125--- linux-3.0.7/fs/cachefiles/proc.c 2011-07-21 22:17:23.000000000 -0400
40126+++ linux-3.0.7/fs/cachefiles/proc.c 2011-08-23 21:47:56.000000000 -0400
40127@@ -14,9 +14,9 @@
40128 #include <linux/seq_file.h>
40129 #include "internal.h"
40130
40131-atomic_t cachefiles_lookup_histogram[HZ];
40132-atomic_t cachefiles_mkdir_histogram[HZ];
40133-atomic_t cachefiles_create_histogram[HZ];
40134+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40135+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40136+atomic_unchecked_t cachefiles_create_histogram[HZ];
40137
40138 /*
40139 * display the latency histogram
40140@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40141 return 0;
40142 default:
40143 index = (unsigned long) v - 3;
40144- x = atomic_read(&cachefiles_lookup_histogram[index]);
40145- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40146- z = atomic_read(&cachefiles_create_histogram[index]);
40147+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40148+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40149+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40150 if (x == 0 && y == 0 && z == 0)
40151 return 0;
40152
40153diff -urNp linux-3.0.7/fs/cachefiles/rdwr.c linux-3.0.7/fs/cachefiles/rdwr.c
40154--- linux-3.0.7/fs/cachefiles/rdwr.c 2011-07-21 22:17:23.000000000 -0400
40155+++ linux-3.0.7/fs/cachefiles/rdwr.c 2011-10-06 04:17:55.000000000 -0400
40156@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40157 old_fs = get_fs();
40158 set_fs(KERNEL_DS);
40159 ret = file->f_op->write(
40160- file, (const void __user *) data, len, &pos);
40161+ file, (const void __force_user *) data, len, &pos);
40162 set_fs(old_fs);
40163 kunmap(page);
40164 if (ret != len)
40165diff -urNp linux-3.0.7/fs/ceph/dir.c linux-3.0.7/fs/ceph/dir.c
40166--- linux-3.0.7/fs/ceph/dir.c 2011-07-21 22:17:23.000000000 -0400
40167+++ linux-3.0.7/fs/ceph/dir.c 2011-08-23 21:47:56.000000000 -0400
40168@@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
40169 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40170 struct ceph_mds_client *mdsc = fsc->mdsc;
40171 unsigned frag = fpos_frag(filp->f_pos);
40172- int off = fpos_off(filp->f_pos);
40173+ unsigned int off = fpos_off(filp->f_pos);
40174 int err;
40175 u32 ftype;
40176 struct ceph_mds_reply_info_parsed *rinfo;
40177diff -urNp linux-3.0.7/fs/cifs/cifs_debug.c linux-3.0.7/fs/cifs/cifs_debug.c
40178--- linux-3.0.7/fs/cifs/cifs_debug.c 2011-07-21 22:17:23.000000000 -0400
40179+++ linux-3.0.7/fs/cifs/cifs_debug.c 2011-08-25 17:18:05.000000000 -0400
40180@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40181
40182 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40183 #ifdef CONFIG_CIFS_STATS2
40184- atomic_set(&totBufAllocCount, 0);
40185- atomic_set(&totSmBufAllocCount, 0);
40186+ atomic_set_unchecked(&totBufAllocCount, 0);
40187+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40188 #endif /* CONFIG_CIFS_STATS2 */
40189 spin_lock(&cifs_tcp_ses_lock);
40190 list_for_each(tmp1, &cifs_tcp_ses_list) {
40191@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40192 tcon = list_entry(tmp3,
40193 struct cifs_tcon,
40194 tcon_list);
40195- atomic_set(&tcon->num_smbs_sent, 0);
40196- atomic_set(&tcon->num_writes, 0);
40197- atomic_set(&tcon->num_reads, 0);
40198- atomic_set(&tcon->num_oplock_brks, 0);
40199- atomic_set(&tcon->num_opens, 0);
40200- atomic_set(&tcon->num_posixopens, 0);
40201- atomic_set(&tcon->num_posixmkdirs, 0);
40202- atomic_set(&tcon->num_closes, 0);
40203- atomic_set(&tcon->num_deletes, 0);
40204- atomic_set(&tcon->num_mkdirs, 0);
40205- atomic_set(&tcon->num_rmdirs, 0);
40206- atomic_set(&tcon->num_renames, 0);
40207- atomic_set(&tcon->num_t2renames, 0);
40208- atomic_set(&tcon->num_ffirst, 0);
40209- atomic_set(&tcon->num_fnext, 0);
40210- atomic_set(&tcon->num_fclose, 0);
40211- atomic_set(&tcon->num_hardlinks, 0);
40212- atomic_set(&tcon->num_symlinks, 0);
40213- atomic_set(&tcon->num_locks, 0);
40214+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40215+ atomic_set_unchecked(&tcon->num_writes, 0);
40216+ atomic_set_unchecked(&tcon->num_reads, 0);
40217+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40218+ atomic_set_unchecked(&tcon->num_opens, 0);
40219+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40220+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40221+ atomic_set_unchecked(&tcon->num_closes, 0);
40222+ atomic_set_unchecked(&tcon->num_deletes, 0);
40223+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40224+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40225+ atomic_set_unchecked(&tcon->num_renames, 0);
40226+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40227+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40228+ atomic_set_unchecked(&tcon->num_fnext, 0);
40229+ atomic_set_unchecked(&tcon->num_fclose, 0);
40230+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40231+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40232+ atomic_set_unchecked(&tcon->num_locks, 0);
40233 }
40234 }
40235 }
40236@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40237 smBufAllocCount.counter, cifs_min_small);
40238 #ifdef CONFIG_CIFS_STATS2
40239 seq_printf(m, "Total Large %d Small %d Allocations\n",
40240- atomic_read(&totBufAllocCount),
40241- atomic_read(&totSmBufAllocCount));
40242+ atomic_read_unchecked(&totBufAllocCount),
40243+ atomic_read_unchecked(&totSmBufAllocCount));
40244 #endif /* CONFIG_CIFS_STATS2 */
40245
40246 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40247@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40248 if (tcon->need_reconnect)
40249 seq_puts(m, "\tDISCONNECTED ");
40250 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40251- atomic_read(&tcon->num_smbs_sent),
40252- atomic_read(&tcon->num_oplock_brks));
40253+ atomic_read_unchecked(&tcon->num_smbs_sent),
40254+ atomic_read_unchecked(&tcon->num_oplock_brks));
40255 seq_printf(m, "\nReads: %d Bytes: %lld",
40256- atomic_read(&tcon->num_reads),
40257+ atomic_read_unchecked(&tcon->num_reads),
40258 (long long)(tcon->bytes_read));
40259 seq_printf(m, "\nWrites: %d Bytes: %lld",
40260- atomic_read(&tcon->num_writes),
40261+ atomic_read_unchecked(&tcon->num_writes),
40262 (long long)(tcon->bytes_written));
40263 seq_printf(m, "\nFlushes: %d",
40264- atomic_read(&tcon->num_flushes));
40265+ atomic_read_unchecked(&tcon->num_flushes));
40266 seq_printf(m, "\nLocks: %d HardLinks: %d "
40267 "Symlinks: %d",
40268- atomic_read(&tcon->num_locks),
40269- atomic_read(&tcon->num_hardlinks),
40270- atomic_read(&tcon->num_symlinks));
40271+ atomic_read_unchecked(&tcon->num_locks),
40272+ atomic_read_unchecked(&tcon->num_hardlinks),
40273+ atomic_read_unchecked(&tcon->num_symlinks));
40274 seq_printf(m, "\nOpens: %d Closes: %d "
40275 "Deletes: %d",
40276- atomic_read(&tcon->num_opens),
40277- atomic_read(&tcon->num_closes),
40278- atomic_read(&tcon->num_deletes));
40279+ atomic_read_unchecked(&tcon->num_opens),
40280+ atomic_read_unchecked(&tcon->num_closes),
40281+ atomic_read_unchecked(&tcon->num_deletes));
40282 seq_printf(m, "\nPosix Opens: %d "
40283 "Posix Mkdirs: %d",
40284- atomic_read(&tcon->num_posixopens),
40285- atomic_read(&tcon->num_posixmkdirs));
40286+ atomic_read_unchecked(&tcon->num_posixopens),
40287+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40288 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40289- atomic_read(&tcon->num_mkdirs),
40290- atomic_read(&tcon->num_rmdirs));
40291+ atomic_read_unchecked(&tcon->num_mkdirs),
40292+ atomic_read_unchecked(&tcon->num_rmdirs));
40293 seq_printf(m, "\nRenames: %d T2 Renames %d",
40294- atomic_read(&tcon->num_renames),
40295- atomic_read(&tcon->num_t2renames));
40296+ atomic_read_unchecked(&tcon->num_renames),
40297+ atomic_read_unchecked(&tcon->num_t2renames));
40298 seq_printf(m, "\nFindFirst: %d FNext %d "
40299 "FClose %d",
40300- atomic_read(&tcon->num_ffirst),
40301- atomic_read(&tcon->num_fnext),
40302- atomic_read(&tcon->num_fclose));
40303+ atomic_read_unchecked(&tcon->num_ffirst),
40304+ atomic_read_unchecked(&tcon->num_fnext),
40305+ atomic_read_unchecked(&tcon->num_fclose));
40306 }
40307 }
40308 }
40309diff -urNp linux-3.0.7/fs/cifs/cifsfs.c linux-3.0.7/fs/cifs/cifsfs.c
40310--- linux-3.0.7/fs/cifs/cifsfs.c 2011-09-02 18:11:21.000000000 -0400
40311+++ linux-3.0.7/fs/cifs/cifsfs.c 2011-08-25 17:18:05.000000000 -0400
40312@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
40313 cifs_req_cachep = kmem_cache_create("cifs_request",
40314 CIFSMaxBufSize +
40315 MAX_CIFS_HDR_SIZE, 0,
40316- SLAB_HWCACHE_ALIGN, NULL);
40317+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40318 if (cifs_req_cachep == NULL)
40319 return -ENOMEM;
40320
40321@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
40322 efficient to alloc 1 per page off the slab compared to 17K (5page)
40323 alloc of large cifs buffers even when page debugging is on */
40324 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40325- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40326+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40327 NULL);
40328 if (cifs_sm_req_cachep == NULL) {
40329 mempool_destroy(cifs_req_poolp);
40330@@ -1106,8 +1106,8 @@ init_cifs(void)
40331 atomic_set(&bufAllocCount, 0);
40332 atomic_set(&smBufAllocCount, 0);
40333 #ifdef CONFIG_CIFS_STATS2
40334- atomic_set(&totBufAllocCount, 0);
40335- atomic_set(&totSmBufAllocCount, 0);
40336+ atomic_set_unchecked(&totBufAllocCount, 0);
40337+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40338 #endif /* CONFIG_CIFS_STATS2 */
40339
40340 atomic_set(&midCount, 0);
40341diff -urNp linux-3.0.7/fs/cifs/cifsglob.h linux-3.0.7/fs/cifs/cifsglob.h
40342--- linux-3.0.7/fs/cifs/cifsglob.h 2011-07-21 22:17:23.000000000 -0400
40343+++ linux-3.0.7/fs/cifs/cifsglob.h 2011-08-25 17:18:05.000000000 -0400
40344@@ -381,28 +381,28 @@ struct cifs_tcon {
40345 __u16 Flags; /* optional support bits */
40346 enum statusEnum tidStatus;
40347 #ifdef CONFIG_CIFS_STATS
40348- atomic_t num_smbs_sent;
40349- atomic_t num_writes;
40350- atomic_t num_reads;
40351- atomic_t num_flushes;
40352- atomic_t num_oplock_brks;
40353- atomic_t num_opens;
40354- atomic_t num_closes;
40355- atomic_t num_deletes;
40356- atomic_t num_mkdirs;
40357- atomic_t num_posixopens;
40358- atomic_t num_posixmkdirs;
40359- atomic_t num_rmdirs;
40360- atomic_t num_renames;
40361- atomic_t num_t2renames;
40362- atomic_t num_ffirst;
40363- atomic_t num_fnext;
40364- atomic_t num_fclose;
40365- atomic_t num_hardlinks;
40366- atomic_t num_symlinks;
40367- atomic_t num_locks;
40368- atomic_t num_acl_get;
40369- atomic_t num_acl_set;
40370+ atomic_unchecked_t num_smbs_sent;
40371+ atomic_unchecked_t num_writes;
40372+ atomic_unchecked_t num_reads;
40373+ atomic_unchecked_t num_flushes;
40374+ atomic_unchecked_t num_oplock_brks;
40375+ atomic_unchecked_t num_opens;
40376+ atomic_unchecked_t num_closes;
40377+ atomic_unchecked_t num_deletes;
40378+ atomic_unchecked_t num_mkdirs;
40379+ atomic_unchecked_t num_posixopens;
40380+ atomic_unchecked_t num_posixmkdirs;
40381+ atomic_unchecked_t num_rmdirs;
40382+ atomic_unchecked_t num_renames;
40383+ atomic_unchecked_t num_t2renames;
40384+ atomic_unchecked_t num_ffirst;
40385+ atomic_unchecked_t num_fnext;
40386+ atomic_unchecked_t num_fclose;
40387+ atomic_unchecked_t num_hardlinks;
40388+ atomic_unchecked_t num_symlinks;
40389+ atomic_unchecked_t num_locks;
40390+ atomic_unchecked_t num_acl_get;
40391+ atomic_unchecked_t num_acl_set;
40392 #ifdef CONFIG_CIFS_STATS2
40393 unsigned long long time_writes;
40394 unsigned long long time_reads;
40395@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40396 }
40397
40398 #ifdef CONFIG_CIFS_STATS
40399-#define cifs_stats_inc atomic_inc
40400+#define cifs_stats_inc atomic_inc_unchecked
40401
40402 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40403 unsigned int bytes)
40404@@ -911,8 +911,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40405 /* Various Debug counters */
40406 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40407 #ifdef CONFIG_CIFS_STATS2
40408-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40409-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40410+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40411+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40412 #endif
40413 GLOBAL_EXTERN atomic_t smBufAllocCount;
40414 GLOBAL_EXTERN atomic_t midCount;
40415diff -urNp linux-3.0.7/fs/cifs/link.c linux-3.0.7/fs/cifs/link.c
40416--- linux-3.0.7/fs/cifs/link.c 2011-07-21 22:17:23.000000000 -0400
40417+++ linux-3.0.7/fs/cifs/link.c 2011-08-23 21:47:56.000000000 -0400
40418@@ -587,7 +587,7 @@ symlink_exit:
40419
40420 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40421 {
40422- char *p = nd_get_link(nd);
40423+ const char *p = nd_get_link(nd);
40424 if (!IS_ERR(p))
40425 kfree(p);
40426 }
40427diff -urNp linux-3.0.7/fs/cifs/misc.c linux-3.0.7/fs/cifs/misc.c
40428--- linux-3.0.7/fs/cifs/misc.c 2011-07-21 22:17:23.000000000 -0400
40429+++ linux-3.0.7/fs/cifs/misc.c 2011-08-25 17:18:05.000000000 -0400
40430@@ -156,7 +156,7 @@ cifs_buf_get(void)
40431 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40432 atomic_inc(&bufAllocCount);
40433 #ifdef CONFIG_CIFS_STATS2
40434- atomic_inc(&totBufAllocCount);
40435+ atomic_inc_unchecked(&totBufAllocCount);
40436 #endif /* CONFIG_CIFS_STATS2 */
40437 }
40438
40439@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
40440 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40441 atomic_inc(&smBufAllocCount);
40442 #ifdef CONFIG_CIFS_STATS2
40443- atomic_inc(&totSmBufAllocCount);
40444+ atomic_inc_unchecked(&totSmBufAllocCount);
40445 #endif /* CONFIG_CIFS_STATS2 */
40446
40447 }
40448diff -urNp linux-3.0.7/fs/coda/cache.c linux-3.0.7/fs/coda/cache.c
40449--- linux-3.0.7/fs/coda/cache.c 2011-07-21 22:17:23.000000000 -0400
40450+++ linux-3.0.7/fs/coda/cache.c 2011-08-23 21:47:56.000000000 -0400
40451@@ -24,7 +24,7 @@
40452 #include "coda_linux.h"
40453 #include "coda_cache.h"
40454
40455-static atomic_t permission_epoch = ATOMIC_INIT(0);
40456+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40457
40458 /* replace or extend an acl cache hit */
40459 void coda_cache_enter(struct inode *inode, int mask)
40460@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
40461 struct coda_inode_info *cii = ITOC(inode);
40462
40463 spin_lock(&cii->c_lock);
40464- cii->c_cached_epoch = atomic_read(&permission_epoch);
40465+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40466 if (cii->c_uid != current_fsuid()) {
40467 cii->c_uid = current_fsuid();
40468 cii->c_cached_perm = mask;
40469@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
40470 {
40471 struct coda_inode_info *cii = ITOC(inode);
40472 spin_lock(&cii->c_lock);
40473- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40474+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40475 spin_unlock(&cii->c_lock);
40476 }
40477
40478 /* remove all acl caches */
40479 void coda_cache_clear_all(struct super_block *sb)
40480 {
40481- atomic_inc(&permission_epoch);
40482+ atomic_inc_unchecked(&permission_epoch);
40483 }
40484
40485
40486@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
40487 spin_lock(&cii->c_lock);
40488 hit = (mask & cii->c_cached_perm) == mask &&
40489 cii->c_uid == current_fsuid() &&
40490- cii->c_cached_epoch == atomic_read(&permission_epoch);
40491+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40492 spin_unlock(&cii->c_lock);
40493
40494 return hit;
40495diff -urNp linux-3.0.7/fs/compat_binfmt_elf.c linux-3.0.7/fs/compat_binfmt_elf.c
40496--- linux-3.0.7/fs/compat_binfmt_elf.c 2011-07-21 22:17:23.000000000 -0400
40497+++ linux-3.0.7/fs/compat_binfmt_elf.c 2011-08-23 21:47:56.000000000 -0400
40498@@ -30,11 +30,13 @@
40499 #undef elf_phdr
40500 #undef elf_shdr
40501 #undef elf_note
40502+#undef elf_dyn
40503 #undef elf_addr_t
40504 #define elfhdr elf32_hdr
40505 #define elf_phdr elf32_phdr
40506 #define elf_shdr elf32_shdr
40507 #define elf_note elf32_note
40508+#define elf_dyn Elf32_Dyn
40509 #define elf_addr_t Elf32_Addr
40510
40511 /*
40512diff -urNp linux-3.0.7/fs/compat.c linux-3.0.7/fs/compat.c
40513--- linux-3.0.7/fs/compat.c 2011-07-21 22:17:23.000000000 -0400
40514+++ linux-3.0.7/fs/compat.c 2011-10-06 04:17:55.000000000 -0400
40515@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
40516 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
40517 {
40518 compat_ino_t ino = stat->ino;
40519- typeof(ubuf->st_uid) uid = 0;
40520- typeof(ubuf->st_gid) gid = 0;
40521+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
40522+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
40523 int err;
40524
40525 SET_UID(uid, stat->uid);
40526@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
40527
40528 set_fs(KERNEL_DS);
40529 /* The __user pointer cast is valid because of the set_fs() */
40530- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
40531+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
40532 set_fs(oldfs);
40533 /* truncating is ok because it's a user address */
40534 if (!ret)
40535@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
40536 goto out;
40537
40538 ret = -EINVAL;
40539- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
40540+ if (nr_segs > UIO_MAXIOV)
40541 goto out;
40542 if (nr_segs > fast_segs) {
40543 ret = -ENOMEM;
40544@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
40545
40546 struct compat_readdir_callback {
40547 struct compat_old_linux_dirent __user *dirent;
40548+ struct file * file;
40549 int result;
40550 };
40551
40552@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
40553 buf->result = -EOVERFLOW;
40554 return -EOVERFLOW;
40555 }
40556+
40557+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40558+ return 0;
40559+
40560 buf->result++;
40561 dirent = buf->dirent;
40562 if (!access_ok(VERIFY_WRITE, dirent,
40563@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
40564
40565 buf.result = 0;
40566 buf.dirent = dirent;
40567+ buf.file = file;
40568
40569 error = vfs_readdir(file, compat_fillonedir, &buf);
40570 if (buf.result)
40571@@ -917,6 +923,7 @@ struct compat_linux_dirent {
40572 struct compat_getdents_callback {
40573 struct compat_linux_dirent __user *current_dir;
40574 struct compat_linux_dirent __user *previous;
40575+ struct file * file;
40576 int count;
40577 int error;
40578 };
40579@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
40580 buf->error = -EOVERFLOW;
40581 return -EOVERFLOW;
40582 }
40583+
40584+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40585+ return 0;
40586+
40587 dirent = buf->previous;
40588 if (dirent) {
40589 if (__put_user(offset, &dirent->d_off))
40590@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
40591 buf.previous = NULL;
40592 buf.count = count;
40593 buf.error = 0;
40594+ buf.file = file;
40595
40596 error = vfs_readdir(file, compat_filldir, &buf);
40597 if (error >= 0)
40598@@ -1006,6 +1018,7 @@ out:
40599 struct compat_getdents_callback64 {
40600 struct linux_dirent64 __user *current_dir;
40601 struct linux_dirent64 __user *previous;
40602+ struct file * file;
40603 int count;
40604 int error;
40605 };
40606@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
40607 buf->error = -EINVAL; /* only used if we fail.. */
40608 if (reclen > buf->count)
40609 return -EINVAL;
40610+
40611+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40612+ return 0;
40613+
40614 dirent = buf->previous;
40615
40616 if (dirent) {
40617@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
40618 buf.previous = NULL;
40619 buf.count = count;
40620 buf.error = 0;
40621+ buf.file = file;
40622
40623 error = vfs_readdir(file, compat_filldir64, &buf);
40624 if (error >= 0)
40625 error = buf.error;
40626 lastdirent = buf.previous;
40627 if (lastdirent) {
40628- typeof(lastdirent->d_off) d_off = file->f_pos;
40629+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
40630 if (__put_user_unaligned(d_off, &lastdirent->d_off))
40631 error = -EFAULT;
40632 else
40633@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
40634 struct fdtable *fdt;
40635 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40636
40637+ pax_track_stack();
40638+
40639 if (n < 0)
40640 goto out_nofds;
40641
40642@@ -1904,7 +1924,7 @@ asmlinkage long compat_sys_nfsservctl(in
40643 oldfs = get_fs();
40644 set_fs(KERNEL_DS);
40645 /* The __user pointer casts are valid because of the set_fs() */
40646- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
40647+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
40648 set_fs(oldfs);
40649
40650 if (err)
40651diff -urNp linux-3.0.7/fs/compat_ioctl.c linux-3.0.7/fs/compat_ioctl.c
40652--- linux-3.0.7/fs/compat_ioctl.c 2011-07-21 22:17:23.000000000 -0400
40653+++ linux-3.0.7/fs/compat_ioctl.c 2011-10-06 04:17:55.000000000 -0400
40654@@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
40655
40656 err = get_user(palp, &up->palette);
40657 err |= get_user(length, &up->length);
40658+ if (err)
40659+ return -EFAULT;
40660
40661 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40662 err = put_user(compat_ptr(palp), &up_native->palette);
40663@@ -619,7 +621,7 @@ static int serial_struct_ioctl(unsigned
40664 return -EFAULT;
40665 if (__get_user(udata, &ss32->iomem_base))
40666 return -EFAULT;
40667- ss.iomem_base = compat_ptr(udata);
40668+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
40669 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
40670 __get_user(ss.port_high, &ss32->port_high))
40671 return -EFAULT;
40672@@ -794,7 +796,7 @@ static int compat_ioctl_preallocate(stru
40673 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
40674 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
40675 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
40676- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40677+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
40678 return -EFAULT;
40679
40680 return ioctl_preallocate(file, p);
40681@@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
40682 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
40683 {
40684 unsigned int a, b;
40685- a = *(unsigned int *)p;
40686- b = *(unsigned int *)q;
40687+ a = *(const unsigned int *)p;
40688+ b = *(const unsigned int *)q;
40689 if (a > b)
40690 return 1;
40691 if (a < b)
40692diff -urNp linux-3.0.7/fs/configfs/dir.c linux-3.0.7/fs/configfs/dir.c
40693--- linux-3.0.7/fs/configfs/dir.c 2011-07-21 22:17:23.000000000 -0400
40694+++ linux-3.0.7/fs/configfs/dir.c 2011-08-23 21:47:56.000000000 -0400
40695@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
40696 }
40697 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40698 struct configfs_dirent *next;
40699- const char * name;
40700+ const unsigned char * name;
40701+ char d_name[sizeof(next->s_dentry->d_iname)];
40702 int len;
40703 struct inode *inode = NULL;
40704
40705@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
40706 continue;
40707
40708 name = configfs_get_name(next);
40709- len = strlen(name);
40710+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40711+ len = next->s_dentry->d_name.len;
40712+ memcpy(d_name, name, len);
40713+ name = d_name;
40714+ } else
40715+ len = strlen(name);
40716
40717 /*
40718 * We'll have a dentry and an inode for
40719diff -urNp linux-3.0.7/fs/dcache.c linux-3.0.7/fs/dcache.c
40720--- linux-3.0.7/fs/dcache.c 2011-07-21 22:17:23.000000000 -0400
40721+++ linux-3.0.7/fs/dcache.c 2011-08-23 21:47:56.000000000 -0400
40722@@ -3089,7 +3089,7 @@ void __init vfs_caches_init(unsigned lon
40723 mempages -= reserve;
40724
40725 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40726- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40727+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40728
40729 dcache_init();
40730 inode_init();
40731diff -urNp linux-3.0.7/fs/ecryptfs/inode.c linux-3.0.7/fs/ecryptfs/inode.c
40732--- linux-3.0.7/fs/ecryptfs/inode.c 2011-09-02 18:11:21.000000000 -0400
40733+++ linux-3.0.7/fs/ecryptfs/inode.c 2011-10-06 04:17:55.000000000 -0400
40734@@ -704,7 +704,7 @@ static int ecryptfs_readlink_lower(struc
40735 old_fs = get_fs();
40736 set_fs(get_ds());
40737 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40738- (char __user *)lower_buf,
40739+ (char __force_user *)lower_buf,
40740 lower_bufsiz);
40741 set_fs(old_fs);
40742 if (rc < 0)
40743@@ -750,7 +750,7 @@ static void *ecryptfs_follow_link(struct
40744 }
40745 old_fs = get_fs();
40746 set_fs(get_ds());
40747- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40748+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
40749 set_fs(old_fs);
40750 if (rc < 0) {
40751 kfree(buf);
40752@@ -765,7 +765,7 @@ out:
40753 static void
40754 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
40755 {
40756- char *buf = nd_get_link(nd);
40757+ const char *buf = nd_get_link(nd);
40758 if (!IS_ERR(buf)) {
40759 /* Free the char* */
40760 kfree(buf);
40761diff -urNp linux-3.0.7/fs/ecryptfs/miscdev.c linux-3.0.7/fs/ecryptfs/miscdev.c
40762--- linux-3.0.7/fs/ecryptfs/miscdev.c 2011-07-21 22:17:23.000000000 -0400
40763+++ linux-3.0.7/fs/ecryptfs/miscdev.c 2011-08-23 21:47:56.000000000 -0400
40764@@ -328,7 +328,7 @@ check_list:
40765 goto out_unlock_msg_ctx;
40766 i = 5;
40767 if (msg_ctx->msg) {
40768- if (copy_to_user(&buf[i], packet_length, packet_length_size))
40769+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
40770 goto out_unlock_msg_ctx;
40771 i += packet_length_size;
40772 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
40773diff -urNp linux-3.0.7/fs/ecryptfs/read_write.c linux-3.0.7/fs/ecryptfs/read_write.c
40774--- linux-3.0.7/fs/ecryptfs/read_write.c 2011-09-02 18:11:21.000000000 -0400
40775+++ linux-3.0.7/fs/ecryptfs/read_write.c 2011-10-06 04:17:55.000000000 -0400
40776@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
40777 return -EIO;
40778 fs_save = get_fs();
40779 set_fs(get_ds());
40780- rc = vfs_write(lower_file, data, size, &offset);
40781+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
40782 set_fs(fs_save);
40783 mark_inode_dirty_sync(ecryptfs_inode);
40784 return rc;
40785@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
40786 return -EIO;
40787 fs_save = get_fs();
40788 set_fs(get_ds());
40789- rc = vfs_read(lower_file, data, size, &offset);
40790+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
40791 set_fs(fs_save);
40792 return rc;
40793 }
40794diff -urNp linux-3.0.7/fs/exec.c linux-3.0.7/fs/exec.c
40795--- linux-3.0.7/fs/exec.c 2011-10-17 23:17:09.000000000 -0400
40796+++ linux-3.0.7/fs/exec.c 2011-10-17 23:17:19.000000000 -0400
40797@@ -55,12 +55,24 @@
40798 #include <linux/pipe_fs_i.h>
40799 #include <linux/oom.h>
40800 #include <linux/compat.h>
40801+#include <linux/random.h>
40802+#include <linux/seq_file.h>
40803+
40804+#ifdef CONFIG_PAX_REFCOUNT
40805+#include <linux/kallsyms.h>
40806+#include <linux/kdebug.h>
40807+#endif
40808
40809 #include <asm/uaccess.h>
40810 #include <asm/mmu_context.h>
40811 #include <asm/tlb.h>
40812 #include "internal.h"
40813
40814+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40815+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40816+EXPORT_SYMBOL(pax_set_initial_flags_func);
40817+#endif
40818+
40819 int core_uses_pid;
40820 char core_pattern[CORENAME_MAX_SIZE] = "core";
40821 unsigned int core_pipe_limit;
40822@@ -70,7 +82,7 @@ struct core_name {
40823 char *corename;
40824 int used, size;
40825 };
40826-static atomic_t call_count = ATOMIC_INIT(1);
40827+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
40828
40829 /* The maximal length of core_pattern is also specified in sysctl.c */
40830
40831@@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40832 char *tmp = getname(library);
40833 int error = PTR_ERR(tmp);
40834 static const struct open_flags uselib_flags = {
40835- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
40836+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
40837 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
40838 .intent = LOOKUP_OPEN
40839 };
40840@@ -195,18 +207,10 @@ static struct page *get_arg_page(struct
40841 int write)
40842 {
40843 struct page *page;
40844- int ret;
40845
40846-#ifdef CONFIG_STACK_GROWSUP
40847- if (write) {
40848- ret = expand_downwards(bprm->vma, pos);
40849- if (ret < 0)
40850- return NULL;
40851- }
40852-#endif
40853- ret = get_user_pages(current, bprm->mm, pos,
40854- 1, write, 1, &page, NULL);
40855- if (ret <= 0)
40856+ if (0 > expand_downwards(bprm->vma, pos))
40857+ return NULL;
40858+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40859 return NULL;
40860
40861 if (write) {
40862@@ -281,6 +285,11 @@ static int __bprm_mm_init(struct linux_b
40863 vma->vm_end = STACK_TOP_MAX;
40864 vma->vm_start = vma->vm_end - PAGE_SIZE;
40865 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
40866+
40867+#ifdef CONFIG_PAX_SEGMEXEC
40868+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40869+#endif
40870+
40871 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40872 INIT_LIST_HEAD(&vma->anon_vma_chain);
40873
40874@@ -295,6 +304,12 @@ static int __bprm_mm_init(struct linux_b
40875 mm->stack_vm = mm->total_vm = 1;
40876 up_write(&mm->mmap_sem);
40877 bprm->p = vma->vm_end - sizeof(void *);
40878+
40879+#ifdef CONFIG_PAX_RANDUSTACK
40880+ if (randomize_va_space)
40881+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40882+#endif
40883+
40884 return 0;
40885 err:
40886 up_write(&mm->mmap_sem);
40887@@ -403,19 +418,7 @@ err:
40888 return err;
40889 }
40890
40891-struct user_arg_ptr {
40892-#ifdef CONFIG_COMPAT
40893- bool is_compat;
40894-#endif
40895- union {
40896- const char __user *const __user *native;
40897-#ifdef CONFIG_COMPAT
40898- compat_uptr_t __user *compat;
40899-#endif
40900- } ptr;
40901-};
40902-
40903-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40904+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
40905 {
40906 const char __user *native;
40907
40908@@ -424,14 +427,14 @@ static const char __user *get_user_arg_p
40909 compat_uptr_t compat;
40910
40911 if (get_user(compat, argv.ptr.compat + nr))
40912- return ERR_PTR(-EFAULT);
40913+ return (const char __force_user *)ERR_PTR(-EFAULT);
40914
40915 return compat_ptr(compat);
40916 }
40917 #endif
40918
40919 if (get_user(native, argv.ptr.native + nr))
40920- return ERR_PTR(-EFAULT);
40921+ return (const char __force_user *)ERR_PTR(-EFAULT);
40922
40923 return native;
40924 }
40925@@ -450,7 +453,7 @@ static int count(struct user_arg_ptr arg
40926 if (!p)
40927 break;
40928
40929- if (IS_ERR(p))
40930+ if (IS_ERR((const char __force_kernel *)p))
40931 return -EFAULT;
40932
40933 if (i++ >= max)
40934@@ -484,7 +487,7 @@ static int copy_strings(int argc, struct
40935
40936 ret = -EFAULT;
40937 str = get_user_arg_ptr(argv, argc);
40938- if (IS_ERR(str))
40939+ if (IS_ERR((const char __force_kernel *)str))
40940 goto out;
40941
40942 len = strnlen_user(str, MAX_ARG_STRLEN);
40943@@ -566,7 +569,7 @@ int copy_strings_kernel(int argc, const
40944 int r;
40945 mm_segment_t oldfs = get_fs();
40946 struct user_arg_ptr argv = {
40947- .ptr.native = (const char __user *const __user *)__argv,
40948+ .ptr.native = (const char __force_user *const __force_user *)__argv,
40949 };
40950
40951 set_fs(KERNEL_DS);
40952@@ -601,7 +604,8 @@ static int shift_arg_pages(struct vm_are
40953 unsigned long new_end = old_end - shift;
40954 struct mmu_gather tlb;
40955
40956- BUG_ON(new_start > new_end);
40957+ if (new_start >= new_end || new_start < mmap_min_addr)
40958+ return -ENOMEM;
40959
40960 /*
40961 * ensure there are no vmas between where we want to go
40962@@ -610,6 +614,10 @@ static int shift_arg_pages(struct vm_are
40963 if (vma != find_vma(mm, new_start))
40964 return -EFAULT;
40965
40966+#ifdef CONFIG_PAX_SEGMEXEC
40967+ BUG_ON(pax_find_mirror_vma(vma));
40968+#endif
40969+
40970 /*
40971 * cover the whole range: [new_start, old_end)
40972 */
40973@@ -690,10 +698,6 @@ int setup_arg_pages(struct linux_binprm
40974 stack_top = arch_align_stack(stack_top);
40975 stack_top = PAGE_ALIGN(stack_top);
40976
40977- if (unlikely(stack_top < mmap_min_addr) ||
40978- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40979- return -ENOMEM;
40980-
40981 stack_shift = vma->vm_end - stack_top;
40982
40983 bprm->p -= stack_shift;
40984@@ -705,8 +709,28 @@ int setup_arg_pages(struct linux_binprm
40985 bprm->exec -= stack_shift;
40986
40987 down_write(&mm->mmap_sem);
40988+
40989+ /* Move stack pages down in memory. */
40990+ if (stack_shift) {
40991+ ret = shift_arg_pages(vma, stack_shift);
40992+ if (ret)
40993+ goto out_unlock;
40994+ }
40995+
40996 vm_flags = VM_STACK_FLAGS;
40997
40998+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40999+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41000+ vm_flags &= ~VM_EXEC;
41001+
41002+#ifdef CONFIG_PAX_MPROTECT
41003+ if (mm->pax_flags & MF_PAX_MPROTECT)
41004+ vm_flags &= ~VM_MAYEXEC;
41005+#endif
41006+
41007+ }
41008+#endif
41009+
41010 /*
41011 * Adjust stack execute permissions; explicitly enable for
41012 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41013@@ -725,13 +749,6 @@ int setup_arg_pages(struct linux_binprm
41014 goto out_unlock;
41015 BUG_ON(prev != vma);
41016
41017- /* Move stack pages down in memory. */
41018- if (stack_shift) {
41019- ret = shift_arg_pages(vma, stack_shift);
41020- if (ret)
41021- goto out_unlock;
41022- }
41023-
41024 /* mprotect_fixup is overkill to remove the temporary stack flags */
41025 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41026
41027@@ -771,7 +788,7 @@ struct file *open_exec(const char *name)
41028 struct file *file;
41029 int err;
41030 static const struct open_flags open_exec_flags = {
41031- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
41032+ .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
41033 .acc_mode = MAY_EXEC | MAY_OPEN,
41034 .intent = LOOKUP_OPEN
41035 };
41036@@ -812,7 +829,7 @@ int kernel_read(struct file *file, loff_
41037 old_fs = get_fs();
41038 set_fs(get_ds());
41039 /* The cast to a user pointer is valid due to the set_fs() */
41040- result = vfs_read(file, (void __user *)addr, count, &pos);
41041+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41042 set_fs(old_fs);
41043 return result;
41044 }
41045@@ -1236,7 +1253,7 @@ int check_unsafe_exec(struct linux_binpr
41046 }
41047 rcu_read_unlock();
41048
41049- if (p->fs->users > n_fs) {
41050+ if (atomic_read(&p->fs->users) > n_fs) {
41051 bprm->unsafe |= LSM_UNSAFE_SHARE;
41052 } else {
41053 res = -EAGAIN;
41054@@ -1430,11 +1447,35 @@ static int do_execve_common(const char *
41055 struct user_arg_ptr envp,
41056 struct pt_regs *regs)
41057 {
41058+#ifdef CONFIG_GRKERNSEC
41059+ struct file *old_exec_file;
41060+ struct acl_subject_label *old_acl;
41061+ struct rlimit old_rlim[RLIM_NLIMITS];
41062+#endif
41063 struct linux_binprm *bprm;
41064 struct file *file;
41065 struct files_struct *displaced;
41066 bool clear_in_exec;
41067 int retval;
41068+ const struct cred *cred = current_cred();
41069+
41070+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41071+
41072+ /*
41073+ * We move the actual failure in case of RLIMIT_NPROC excess from
41074+ * set*uid() to execve() because too many poorly written programs
41075+ * don't check setuid() return code. Here we additionally recheck
41076+ * whether NPROC limit is still exceeded.
41077+ */
41078+ if ((current->flags & PF_NPROC_EXCEEDED) &&
41079+ atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
41080+ retval = -EAGAIN;
41081+ goto out_ret;
41082+ }
41083+
41084+ /* We're below the limit (still or again), so we don't want to make
41085+ * further execve() calls fail. */
41086+ current->flags &= ~PF_NPROC_EXCEEDED;
41087
41088 retval = unshare_files(&displaced);
41089 if (retval)
41090@@ -1466,6 +1507,16 @@ static int do_execve_common(const char *
41091 bprm->filename = filename;
41092 bprm->interp = filename;
41093
41094+ if (gr_process_user_ban()) {
41095+ retval = -EPERM;
41096+ goto out_file;
41097+ }
41098+
41099+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41100+ retval = -EACCES;
41101+ goto out_file;
41102+ }
41103+
41104 retval = bprm_mm_init(bprm);
41105 if (retval)
41106 goto out_file;
41107@@ -1495,9 +1546,40 @@ static int do_execve_common(const char *
41108 if (retval < 0)
41109 goto out;
41110
41111+ if (!gr_tpe_allow(file)) {
41112+ retval = -EACCES;
41113+ goto out;
41114+ }
41115+
41116+ if (gr_check_crash_exec(file)) {
41117+ retval = -EACCES;
41118+ goto out;
41119+ }
41120+
41121+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41122+
41123+ gr_handle_exec_args(bprm, argv);
41124+
41125+#ifdef CONFIG_GRKERNSEC
41126+ old_acl = current->acl;
41127+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41128+ old_exec_file = current->exec_file;
41129+ get_file(file);
41130+ current->exec_file = file;
41131+#endif
41132+
41133+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41134+ bprm->unsafe & LSM_UNSAFE_SHARE);
41135+ if (retval < 0)
41136+ goto out_fail;
41137+
41138 retval = search_binary_handler(bprm,regs);
41139 if (retval < 0)
41140- goto out;
41141+ goto out_fail;
41142+#ifdef CONFIG_GRKERNSEC
41143+ if (old_exec_file)
41144+ fput(old_exec_file);
41145+#endif
41146
41147 /* execve succeeded */
41148 current->fs->in_exec = 0;
41149@@ -1508,6 +1590,14 @@ static int do_execve_common(const char *
41150 put_files_struct(displaced);
41151 return retval;
41152
41153+out_fail:
41154+#ifdef CONFIG_GRKERNSEC
41155+ current->acl = old_acl;
41156+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41157+ fput(current->exec_file);
41158+ current->exec_file = old_exec_file;
41159+#endif
41160+
41161 out:
41162 if (bprm->mm) {
41163 acct_arg_size(bprm, 0);
41164@@ -1581,7 +1671,7 @@ static int expand_corename(struct core_n
41165 {
41166 char *old_corename = cn->corename;
41167
41168- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41169+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41170 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41171
41172 if (!cn->corename) {
41173@@ -1669,7 +1759,7 @@ static int format_corename(struct core_n
41174 int pid_in_pattern = 0;
41175 int err = 0;
41176
41177- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41178+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41179 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41180 cn->used = 0;
41181
41182@@ -1760,6 +1850,219 @@ out:
41183 return ispipe;
41184 }
41185
41186+int pax_check_flags(unsigned long *flags)
41187+{
41188+ int retval = 0;
41189+
41190+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41191+ if (*flags & MF_PAX_SEGMEXEC)
41192+ {
41193+ *flags &= ~MF_PAX_SEGMEXEC;
41194+ retval = -EINVAL;
41195+ }
41196+#endif
41197+
41198+ if ((*flags & MF_PAX_PAGEEXEC)
41199+
41200+#ifdef CONFIG_PAX_PAGEEXEC
41201+ && (*flags & MF_PAX_SEGMEXEC)
41202+#endif
41203+
41204+ )
41205+ {
41206+ *flags &= ~MF_PAX_PAGEEXEC;
41207+ retval = -EINVAL;
41208+ }
41209+
41210+ if ((*flags & MF_PAX_MPROTECT)
41211+
41212+#ifdef CONFIG_PAX_MPROTECT
41213+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41214+#endif
41215+
41216+ )
41217+ {
41218+ *flags &= ~MF_PAX_MPROTECT;
41219+ retval = -EINVAL;
41220+ }
41221+
41222+ if ((*flags & MF_PAX_EMUTRAMP)
41223+
41224+#ifdef CONFIG_PAX_EMUTRAMP
41225+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41226+#endif
41227+
41228+ )
41229+ {
41230+ *flags &= ~MF_PAX_EMUTRAMP;
41231+ retval = -EINVAL;
41232+ }
41233+
41234+ return retval;
41235+}
41236+
41237+EXPORT_SYMBOL(pax_check_flags);
41238+
41239+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41240+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41241+{
41242+ struct task_struct *tsk = current;
41243+ struct mm_struct *mm = current->mm;
41244+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41245+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41246+ char *path_exec = NULL;
41247+ char *path_fault = NULL;
41248+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41249+
41250+ if (buffer_exec && buffer_fault) {
41251+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41252+
41253+ down_read(&mm->mmap_sem);
41254+ vma = mm->mmap;
41255+ while (vma && (!vma_exec || !vma_fault)) {
41256+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41257+ vma_exec = vma;
41258+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41259+ vma_fault = vma;
41260+ vma = vma->vm_next;
41261+ }
41262+ if (vma_exec) {
41263+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41264+ if (IS_ERR(path_exec))
41265+ path_exec = "<path too long>";
41266+ else {
41267+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41268+ if (path_exec) {
41269+ *path_exec = 0;
41270+ path_exec = buffer_exec;
41271+ } else
41272+ path_exec = "<path too long>";
41273+ }
41274+ }
41275+ if (vma_fault) {
41276+ start = vma_fault->vm_start;
41277+ end = vma_fault->vm_end;
41278+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41279+ if (vma_fault->vm_file) {
41280+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41281+ if (IS_ERR(path_fault))
41282+ path_fault = "<path too long>";
41283+ else {
41284+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41285+ if (path_fault) {
41286+ *path_fault = 0;
41287+ path_fault = buffer_fault;
41288+ } else
41289+ path_fault = "<path too long>";
41290+ }
41291+ } else
41292+ path_fault = "<anonymous mapping>";
41293+ }
41294+ up_read(&mm->mmap_sem);
41295+ }
41296+ if (tsk->signal->curr_ip)
41297+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41298+ else
41299+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41300+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41301+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41302+ task_uid(tsk), task_euid(tsk), pc, sp);
41303+ free_page((unsigned long)buffer_exec);
41304+ free_page((unsigned long)buffer_fault);
41305+ pax_report_insns(pc, sp);
41306+ do_coredump(SIGKILL, SIGKILL, regs);
41307+}
41308+#endif
41309+
41310+#ifdef CONFIG_PAX_REFCOUNT
41311+void pax_report_refcount_overflow(struct pt_regs *regs)
41312+{
41313+ if (current->signal->curr_ip)
41314+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41315+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41316+ else
41317+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41318+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41319+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41320+ show_regs(regs);
41321+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41322+}
41323+#endif
41324+
41325+#ifdef CONFIG_PAX_USERCOPY
41326+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41327+int object_is_on_stack(const void *obj, unsigned long len)
41328+{
41329+ const void * const stack = task_stack_page(current);
41330+ const void * const stackend = stack + THREAD_SIZE;
41331+
41332+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41333+ const void *frame = NULL;
41334+ const void *oldframe;
41335+#endif
41336+
41337+ if (obj + len < obj)
41338+ return -1;
41339+
41340+ if (obj + len <= stack || stackend <= obj)
41341+ return 0;
41342+
41343+ if (obj < stack || stackend < obj + len)
41344+ return -1;
41345+
41346+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41347+ oldframe = __builtin_frame_address(1);
41348+ if (oldframe)
41349+ frame = __builtin_frame_address(2);
41350+ /*
41351+ low ----------------------------------------------> high
41352+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41353+ ^----------------^
41354+ allow copies only within here
41355+ */
41356+ while (stack <= frame && frame < stackend) {
41357+ /* if obj + len extends past the last frame, this
41358+ check won't pass and the next frame will be 0,
41359+ causing us to bail out and correctly report
41360+ the copy as invalid
41361+ */
41362+ if (obj + len <= frame)
41363+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41364+ oldframe = frame;
41365+ frame = *(const void * const *)frame;
41366+ }
41367+ return -1;
41368+#else
41369+ return 1;
41370+#endif
41371+}
41372+
41373+
41374+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41375+{
41376+ if (current->signal->curr_ip)
41377+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41378+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41379+ else
41380+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41381+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41382+ dump_stack();
41383+ gr_handle_kernel_exploit();
41384+ do_group_exit(SIGKILL);
41385+}
41386+#endif
41387+
41388+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41389+void pax_track_stack(void)
41390+{
41391+ unsigned long sp = (unsigned long)&sp;
41392+ if (sp < current_thread_info()->lowest_stack &&
41393+ sp > (unsigned long)task_stack_page(current))
41394+ current_thread_info()->lowest_stack = sp;
41395+}
41396+EXPORT_SYMBOL(pax_track_stack);
41397+#endif
41398+
41399 static int zap_process(struct task_struct *start, int exit_code)
41400 {
41401 struct task_struct *t;
41402@@ -1971,17 +2274,17 @@ static void wait_for_dump_helpers(struct
41403 pipe = file->f_path.dentry->d_inode->i_pipe;
41404
41405 pipe_lock(pipe);
41406- pipe->readers++;
41407- pipe->writers--;
41408+ atomic_inc(&pipe->readers);
41409+ atomic_dec(&pipe->writers);
41410
41411- while ((pipe->readers > 1) && (!signal_pending(current))) {
41412+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41413 wake_up_interruptible_sync(&pipe->wait);
41414 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41415 pipe_wait(pipe);
41416 }
41417
41418- pipe->readers--;
41419- pipe->writers++;
41420+ atomic_dec(&pipe->readers);
41421+ atomic_inc(&pipe->writers);
41422 pipe_unlock(pipe);
41423
41424 }
41425@@ -2042,7 +2345,7 @@ void do_coredump(long signr, int exit_co
41426 int retval = 0;
41427 int flag = 0;
41428 int ispipe;
41429- static atomic_t core_dump_count = ATOMIC_INIT(0);
41430+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41431 struct coredump_params cprm = {
41432 .signr = signr,
41433 .regs = regs,
41434@@ -2057,6 +2360,9 @@ void do_coredump(long signr, int exit_co
41435
41436 audit_core_dumps(signr);
41437
41438+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41439+ gr_handle_brute_attach(current, cprm.mm_flags);
41440+
41441 binfmt = mm->binfmt;
41442 if (!binfmt || !binfmt->core_dump)
41443 goto fail;
41444@@ -2097,6 +2403,8 @@ void do_coredump(long signr, int exit_co
41445 goto fail_corename;
41446 }
41447
41448+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41449+
41450 if (ispipe) {
41451 int dump_count;
41452 char **helper_argv;
41453@@ -2124,7 +2432,7 @@ void do_coredump(long signr, int exit_co
41454 }
41455 cprm.limit = RLIM_INFINITY;
41456
41457- dump_count = atomic_inc_return(&core_dump_count);
41458+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41459 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41460 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41461 task_tgid_vnr(current), current->comm);
41462@@ -2194,7 +2502,7 @@ close_fail:
41463 filp_close(cprm.file, NULL);
41464 fail_dropcount:
41465 if (ispipe)
41466- atomic_dec(&core_dump_count);
41467+ atomic_dec_unchecked(&core_dump_count);
41468 fail_unlock:
41469 kfree(cn.corename);
41470 fail_corename:
41471@@ -2213,7 +2521,7 @@ fail:
41472 */
41473 int dump_write(struct file *file, const void *addr, int nr)
41474 {
41475- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
41476+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
41477 }
41478 EXPORT_SYMBOL(dump_write);
41479
41480diff -urNp linux-3.0.7/fs/ext2/balloc.c linux-3.0.7/fs/ext2/balloc.c
41481--- linux-3.0.7/fs/ext2/balloc.c 2011-07-21 22:17:23.000000000 -0400
41482+++ linux-3.0.7/fs/ext2/balloc.c 2011-08-23 21:48:14.000000000 -0400
41483@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41484
41485 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41486 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41487- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41488+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41489 sbi->s_resuid != current_fsuid() &&
41490 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41491 return 0;
41492diff -urNp linux-3.0.7/fs/ext3/balloc.c linux-3.0.7/fs/ext3/balloc.c
41493--- linux-3.0.7/fs/ext3/balloc.c 2011-07-21 22:17:23.000000000 -0400
41494+++ linux-3.0.7/fs/ext3/balloc.c 2011-08-23 21:48:14.000000000 -0400
41495@@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
41496
41497 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41498 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41499- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41500+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41501 sbi->s_resuid != current_fsuid() &&
41502 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41503 return 0;
41504diff -urNp linux-3.0.7/fs/ext3/ioctl.c linux-3.0.7/fs/ext3/ioctl.c
41505--- linux-3.0.7/fs/ext3/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41506+++ linux-3.0.7/fs/ext3/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41507@@ -285,7 +285,7 @@ group_add_out:
41508 if (!capable(CAP_SYS_ADMIN))
41509 return -EPERM;
41510
41511- if (copy_from_user(&range, (struct fstrim_range *)arg,
41512+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41513 sizeof(range)))
41514 return -EFAULT;
41515
41516@@ -293,7 +293,7 @@ group_add_out:
41517 if (ret < 0)
41518 return ret;
41519
41520- if (copy_to_user((struct fstrim_range *)arg, &range,
41521+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41522 sizeof(range)))
41523 return -EFAULT;
41524
41525diff -urNp linux-3.0.7/fs/ext4/balloc.c linux-3.0.7/fs/ext4/balloc.c
41526--- linux-3.0.7/fs/ext4/balloc.c 2011-07-21 22:17:23.000000000 -0400
41527+++ linux-3.0.7/fs/ext4/balloc.c 2011-08-23 21:48:14.000000000 -0400
41528@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
41529 /* Hm, nope. Are (enough) root reserved blocks available? */
41530 if (sbi->s_resuid == current_fsuid() ||
41531 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41532- capable(CAP_SYS_RESOURCE) ||
41533- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
41534+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
41535+ capable_nolog(CAP_SYS_RESOURCE)) {
41536
41537 if (free_blocks >= (nblocks + dirty_blocks))
41538 return 1;
41539diff -urNp linux-3.0.7/fs/ext4/ext4.h linux-3.0.7/fs/ext4/ext4.h
41540--- linux-3.0.7/fs/ext4/ext4.h 2011-09-02 18:11:21.000000000 -0400
41541+++ linux-3.0.7/fs/ext4/ext4.h 2011-08-23 21:47:56.000000000 -0400
41542@@ -1177,19 +1177,19 @@ struct ext4_sb_info {
41543 unsigned long s_mb_last_start;
41544
41545 /* stats for buddy allocator */
41546- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41547- atomic_t s_bal_success; /* we found long enough chunks */
41548- atomic_t s_bal_allocated; /* in blocks */
41549- atomic_t s_bal_ex_scanned; /* total extents scanned */
41550- atomic_t s_bal_goals; /* goal hits */
41551- atomic_t s_bal_breaks; /* too long searches */
41552- atomic_t s_bal_2orders; /* 2^order hits */
41553+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41554+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41555+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41556+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41557+ atomic_unchecked_t s_bal_goals; /* goal hits */
41558+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41559+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41560 spinlock_t s_bal_lock;
41561 unsigned long s_mb_buddies_generated;
41562 unsigned long long s_mb_generation_time;
41563- atomic_t s_mb_lost_chunks;
41564- atomic_t s_mb_preallocated;
41565- atomic_t s_mb_discarded;
41566+ atomic_unchecked_t s_mb_lost_chunks;
41567+ atomic_unchecked_t s_mb_preallocated;
41568+ atomic_unchecked_t s_mb_discarded;
41569 atomic_t s_lock_busy;
41570
41571 /* locality groups */
41572diff -urNp linux-3.0.7/fs/ext4/file.c linux-3.0.7/fs/ext4/file.c
41573--- linux-3.0.7/fs/ext4/file.c 2011-07-21 22:17:23.000000000 -0400
41574+++ linux-3.0.7/fs/ext4/file.c 2011-10-17 02:30:30.000000000 -0400
41575@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
41576 path.dentry = mnt->mnt_root;
41577 cp = d_path(&path, buf, sizeof(buf));
41578 if (!IS_ERR(cp)) {
41579- memcpy(sbi->s_es->s_last_mounted, cp,
41580- sizeof(sbi->s_es->s_last_mounted));
41581+ strlcpy(sbi->s_es->s_last_mounted, cp,
41582+ sizeof(sbi->s_es->s_last_mounted));
41583 ext4_mark_super_dirty(sb);
41584 }
41585 }
41586diff -urNp linux-3.0.7/fs/ext4/ioctl.c linux-3.0.7/fs/ext4/ioctl.c
41587--- linux-3.0.7/fs/ext4/ioctl.c 2011-07-21 22:17:23.000000000 -0400
41588+++ linux-3.0.7/fs/ext4/ioctl.c 2011-10-06 04:17:55.000000000 -0400
41589@@ -344,7 +344,7 @@ mext_out:
41590 if (!blk_queue_discard(q))
41591 return -EOPNOTSUPP;
41592
41593- if (copy_from_user(&range, (struct fstrim_range *)arg,
41594+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
41595 sizeof(range)))
41596 return -EFAULT;
41597
41598@@ -354,7 +354,7 @@ mext_out:
41599 if (ret < 0)
41600 return ret;
41601
41602- if (copy_to_user((struct fstrim_range *)arg, &range,
41603+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
41604 sizeof(range)))
41605 return -EFAULT;
41606
41607diff -urNp linux-3.0.7/fs/ext4/mballoc.c linux-3.0.7/fs/ext4/mballoc.c
41608--- linux-3.0.7/fs/ext4/mballoc.c 2011-09-02 18:11:21.000000000 -0400
41609+++ linux-3.0.7/fs/ext4/mballoc.c 2011-08-23 21:48:14.000000000 -0400
41610@@ -1793,7 +1793,7 @@ void ext4_mb_simple_scan_group(struct ex
41611 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41612
41613 if (EXT4_SB(sb)->s_mb_stats)
41614- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41615+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41616
41617 break;
41618 }
41619@@ -2087,7 +2087,7 @@ repeat:
41620 ac->ac_status = AC_STATUS_CONTINUE;
41621 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41622 cr = 3;
41623- atomic_inc(&sbi->s_mb_lost_chunks);
41624+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41625 goto repeat;
41626 }
41627 }
41628@@ -2130,6 +2130,8 @@ static int ext4_mb_seq_groups_show(struc
41629 ext4_grpblk_t counters[16];
41630 } sg;
41631
41632+ pax_track_stack();
41633+
41634 group--;
41635 if (group == 0)
41636 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41637@@ -2553,25 +2555,25 @@ int ext4_mb_release(struct super_block *
41638 if (sbi->s_mb_stats) {
41639 printk(KERN_INFO
41640 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41641- atomic_read(&sbi->s_bal_allocated),
41642- atomic_read(&sbi->s_bal_reqs),
41643- atomic_read(&sbi->s_bal_success));
41644+ atomic_read_unchecked(&sbi->s_bal_allocated),
41645+ atomic_read_unchecked(&sbi->s_bal_reqs),
41646+ atomic_read_unchecked(&sbi->s_bal_success));
41647 printk(KERN_INFO
41648 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41649 "%u 2^N hits, %u breaks, %u lost\n",
41650- atomic_read(&sbi->s_bal_ex_scanned),
41651- atomic_read(&sbi->s_bal_goals),
41652- atomic_read(&sbi->s_bal_2orders),
41653- atomic_read(&sbi->s_bal_breaks),
41654- atomic_read(&sbi->s_mb_lost_chunks));
41655+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41656+ atomic_read_unchecked(&sbi->s_bal_goals),
41657+ atomic_read_unchecked(&sbi->s_bal_2orders),
41658+ atomic_read_unchecked(&sbi->s_bal_breaks),
41659+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41660 printk(KERN_INFO
41661 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41662 sbi->s_mb_buddies_generated++,
41663 sbi->s_mb_generation_time);
41664 printk(KERN_INFO
41665 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41666- atomic_read(&sbi->s_mb_preallocated),
41667- atomic_read(&sbi->s_mb_discarded));
41668+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41669+ atomic_read_unchecked(&sbi->s_mb_discarded));
41670 }
41671
41672 free_percpu(sbi->s_locality_groups);
41673@@ -3041,16 +3043,16 @@ static void ext4_mb_collect_stats(struct
41674 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41675
41676 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41677- atomic_inc(&sbi->s_bal_reqs);
41678- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41679+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41680+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41681 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
41682- atomic_inc(&sbi->s_bal_success);
41683- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41684+ atomic_inc_unchecked(&sbi->s_bal_success);
41685+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41686 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41687 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41688- atomic_inc(&sbi->s_bal_goals);
41689+ atomic_inc_unchecked(&sbi->s_bal_goals);
41690 if (ac->ac_found > sbi->s_mb_max_to_scan)
41691- atomic_inc(&sbi->s_bal_breaks);
41692+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41693 }
41694
41695 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41696@@ -3448,7 +3450,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41697 trace_ext4_mb_new_inode_pa(ac, pa);
41698
41699 ext4_mb_use_inode_pa(ac, pa);
41700- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41701+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41702
41703 ei = EXT4_I(ac->ac_inode);
41704 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41705@@ -3508,7 +3510,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41706 trace_ext4_mb_new_group_pa(ac, pa);
41707
41708 ext4_mb_use_group_pa(ac, pa);
41709- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41710+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41711
41712 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41713 lg = ac->ac_lg;
41714@@ -3595,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41715 * from the bitmap and continue.
41716 */
41717 }
41718- atomic_add(free, &sbi->s_mb_discarded);
41719+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41720
41721 return err;
41722 }
41723@@ -3613,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41724 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41725 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41726 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41727- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41728+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41729 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
41730
41731 return 0;
41732diff -urNp linux-3.0.7/fs/fcntl.c linux-3.0.7/fs/fcntl.c
41733--- linux-3.0.7/fs/fcntl.c 2011-07-21 22:17:23.000000000 -0400
41734+++ linux-3.0.7/fs/fcntl.c 2011-10-06 04:17:55.000000000 -0400
41735@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
41736 if (err)
41737 return err;
41738
41739+ if (gr_handle_chroot_fowner(pid, type))
41740+ return -ENOENT;
41741+ if (gr_check_protected_task_fowner(pid, type))
41742+ return -EACCES;
41743+
41744 f_modown(filp, pid, type, force);
41745 return 0;
41746 }
41747@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
41748
41749 static int f_setown_ex(struct file *filp, unsigned long arg)
41750 {
41751- struct f_owner_ex * __user owner_p = (void * __user)arg;
41752+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41753 struct f_owner_ex owner;
41754 struct pid *pid;
41755 int type;
41756@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
41757
41758 static int f_getown_ex(struct file *filp, unsigned long arg)
41759 {
41760- struct f_owner_ex * __user owner_p = (void * __user)arg;
41761+ struct f_owner_ex __user *owner_p = (void __user *)arg;
41762 struct f_owner_ex owner;
41763 int ret = 0;
41764
41765@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
41766 switch (cmd) {
41767 case F_DUPFD:
41768 case F_DUPFD_CLOEXEC:
41769+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41770 if (arg >= rlimit(RLIMIT_NOFILE))
41771 break;
41772 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41773@@ -835,14 +841,14 @@ static int __init fcntl_init(void)
41774 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
41775 * is defined as O_NONBLOCK on some platforms and not on others.
41776 */
41777- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41778+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
41779 O_RDONLY | O_WRONLY | O_RDWR |
41780 O_CREAT | O_EXCL | O_NOCTTY |
41781 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
41782 __O_SYNC | O_DSYNC | FASYNC |
41783 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
41784 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
41785- __FMODE_EXEC | O_PATH
41786+ __FMODE_EXEC | O_PATH | FMODE_GREXEC
41787 ));
41788
41789 fasync_cache = kmem_cache_create("fasync_cache",
41790diff -urNp linux-3.0.7/fs/fifo.c linux-3.0.7/fs/fifo.c
41791--- linux-3.0.7/fs/fifo.c 2011-07-21 22:17:23.000000000 -0400
41792+++ linux-3.0.7/fs/fifo.c 2011-08-23 21:47:56.000000000 -0400
41793@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
41794 */
41795 filp->f_op = &read_pipefifo_fops;
41796 pipe->r_counter++;
41797- if (pipe->readers++ == 0)
41798+ if (atomic_inc_return(&pipe->readers) == 1)
41799 wake_up_partner(inode);
41800
41801- if (!pipe->writers) {
41802+ if (!atomic_read(&pipe->writers)) {
41803 if ((filp->f_flags & O_NONBLOCK)) {
41804 /* suppress POLLHUP until we have
41805 * seen a writer */
41806@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
41807 * errno=ENXIO when there is no process reading the FIFO.
41808 */
41809 ret = -ENXIO;
41810- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41811+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41812 goto err;
41813
41814 filp->f_op = &write_pipefifo_fops;
41815 pipe->w_counter++;
41816- if (!pipe->writers++)
41817+ if (atomic_inc_return(&pipe->writers) == 1)
41818 wake_up_partner(inode);
41819
41820- if (!pipe->readers) {
41821+ if (!atomic_read(&pipe->readers)) {
41822 wait_for_partner(inode, &pipe->r_counter);
41823 if (signal_pending(current))
41824 goto err_wr;
41825@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
41826 */
41827 filp->f_op = &rdwr_pipefifo_fops;
41828
41829- pipe->readers++;
41830- pipe->writers++;
41831+ atomic_inc(&pipe->readers);
41832+ atomic_inc(&pipe->writers);
41833 pipe->r_counter++;
41834 pipe->w_counter++;
41835- if (pipe->readers == 1 || pipe->writers == 1)
41836+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41837 wake_up_partner(inode);
41838 break;
41839
41840@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
41841 return 0;
41842
41843 err_rd:
41844- if (!--pipe->readers)
41845+ if (atomic_dec_and_test(&pipe->readers))
41846 wake_up_interruptible(&pipe->wait);
41847 ret = -ERESTARTSYS;
41848 goto err;
41849
41850 err_wr:
41851- if (!--pipe->writers)
41852+ if (atomic_dec_and_test(&pipe->writers))
41853 wake_up_interruptible(&pipe->wait);
41854 ret = -ERESTARTSYS;
41855 goto err;
41856
41857 err:
41858- if (!pipe->readers && !pipe->writers)
41859+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41860 free_pipe_info(inode);
41861
41862 err_nocleanup:
41863diff -urNp linux-3.0.7/fs/file.c linux-3.0.7/fs/file.c
41864--- linux-3.0.7/fs/file.c 2011-07-21 22:17:23.000000000 -0400
41865+++ linux-3.0.7/fs/file.c 2011-08-23 21:48:14.000000000 -0400
41866@@ -15,6 +15,7 @@
41867 #include <linux/slab.h>
41868 #include <linux/vmalloc.h>
41869 #include <linux/file.h>
41870+#include <linux/security.h>
41871 #include <linux/fdtable.h>
41872 #include <linux/bitops.h>
41873 #include <linux/interrupt.h>
41874@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
41875 * N.B. For clone tasks sharing a files structure, this test
41876 * will limit the total number of files that can be opened.
41877 */
41878+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41879 if (nr >= rlimit(RLIMIT_NOFILE))
41880 return -EMFILE;
41881
41882diff -urNp linux-3.0.7/fs/filesystems.c linux-3.0.7/fs/filesystems.c
41883--- linux-3.0.7/fs/filesystems.c 2011-07-21 22:17:23.000000000 -0400
41884+++ linux-3.0.7/fs/filesystems.c 2011-08-23 21:48:14.000000000 -0400
41885@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
41886 int len = dot ? dot - name : strlen(name);
41887
41888 fs = __get_fs_type(name, len);
41889+
41890+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41891+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41892+#else
41893 if (!fs && (request_module("%.*s", len, name) == 0))
41894+#endif
41895 fs = __get_fs_type(name, len);
41896
41897 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41898diff -urNp linux-3.0.7/fs/fscache/cookie.c linux-3.0.7/fs/fscache/cookie.c
41899--- linux-3.0.7/fs/fscache/cookie.c 2011-07-21 22:17:23.000000000 -0400
41900+++ linux-3.0.7/fs/fscache/cookie.c 2011-08-23 21:47:56.000000000 -0400
41901@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41902 parent ? (char *) parent->def->name : "<no-parent>",
41903 def->name, netfs_data);
41904
41905- fscache_stat(&fscache_n_acquires);
41906+ fscache_stat_unchecked(&fscache_n_acquires);
41907
41908 /* if there's no parent cookie, then we don't create one here either */
41909 if (!parent) {
41910- fscache_stat(&fscache_n_acquires_null);
41911+ fscache_stat_unchecked(&fscache_n_acquires_null);
41912 _leave(" [no parent]");
41913 return NULL;
41914 }
41915@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41916 /* allocate and initialise a cookie */
41917 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41918 if (!cookie) {
41919- fscache_stat(&fscache_n_acquires_oom);
41920+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41921 _leave(" [ENOMEM]");
41922 return NULL;
41923 }
41924@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41925
41926 switch (cookie->def->type) {
41927 case FSCACHE_COOKIE_TYPE_INDEX:
41928- fscache_stat(&fscache_n_cookie_index);
41929+ fscache_stat_unchecked(&fscache_n_cookie_index);
41930 break;
41931 case FSCACHE_COOKIE_TYPE_DATAFILE:
41932- fscache_stat(&fscache_n_cookie_data);
41933+ fscache_stat_unchecked(&fscache_n_cookie_data);
41934 break;
41935 default:
41936- fscache_stat(&fscache_n_cookie_special);
41937+ fscache_stat_unchecked(&fscache_n_cookie_special);
41938 break;
41939 }
41940
41941@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41942 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41943 atomic_dec(&parent->n_children);
41944 __fscache_cookie_put(cookie);
41945- fscache_stat(&fscache_n_acquires_nobufs);
41946+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41947 _leave(" = NULL");
41948 return NULL;
41949 }
41950 }
41951
41952- fscache_stat(&fscache_n_acquires_ok);
41953+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41954 _leave(" = %p", cookie);
41955 return cookie;
41956 }
41957@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41958 cache = fscache_select_cache_for_object(cookie->parent);
41959 if (!cache) {
41960 up_read(&fscache_addremove_sem);
41961- fscache_stat(&fscache_n_acquires_no_cache);
41962+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41963 _leave(" = -ENOMEDIUM [no cache]");
41964 return -ENOMEDIUM;
41965 }
41966@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41967 object = cache->ops->alloc_object(cache, cookie);
41968 fscache_stat_d(&fscache_n_cop_alloc_object);
41969 if (IS_ERR(object)) {
41970- fscache_stat(&fscache_n_object_no_alloc);
41971+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41972 ret = PTR_ERR(object);
41973 goto error;
41974 }
41975
41976- fscache_stat(&fscache_n_object_alloc);
41977+ fscache_stat_unchecked(&fscache_n_object_alloc);
41978
41979 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41980
41981@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41982 struct fscache_object *object;
41983 struct hlist_node *_p;
41984
41985- fscache_stat(&fscache_n_updates);
41986+ fscache_stat_unchecked(&fscache_n_updates);
41987
41988 if (!cookie) {
41989- fscache_stat(&fscache_n_updates_null);
41990+ fscache_stat_unchecked(&fscache_n_updates_null);
41991 _leave(" [no cookie]");
41992 return;
41993 }
41994@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41995 struct fscache_object *object;
41996 unsigned long event;
41997
41998- fscache_stat(&fscache_n_relinquishes);
41999+ fscache_stat_unchecked(&fscache_n_relinquishes);
42000 if (retire)
42001- fscache_stat(&fscache_n_relinquishes_retire);
42002+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42003
42004 if (!cookie) {
42005- fscache_stat(&fscache_n_relinquishes_null);
42006+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42007 _leave(" [no cookie]");
42008 return;
42009 }
42010@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42011
42012 /* wait for the cookie to finish being instantiated (or to fail) */
42013 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42014- fscache_stat(&fscache_n_relinquishes_waitcrt);
42015+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42016 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42017 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42018 }
42019diff -urNp linux-3.0.7/fs/fscache/internal.h linux-3.0.7/fs/fscache/internal.h
42020--- linux-3.0.7/fs/fscache/internal.h 2011-07-21 22:17:23.000000000 -0400
42021+++ linux-3.0.7/fs/fscache/internal.h 2011-08-23 21:47:56.000000000 -0400
42022@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42023 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42024 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42025
42026-extern atomic_t fscache_n_op_pend;
42027-extern atomic_t fscache_n_op_run;
42028-extern atomic_t fscache_n_op_enqueue;
42029-extern atomic_t fscache_n_op_deferred_release;
42030-extern atomic_t fscache_n_op_release;
42031-extern atomic_t fscache_n_op_gc;
42032-extern atomic_t fscache_n_op_cancelled;
42033-extern atomic_t fscache_n_op_rejected;
42034-
42035-extern atomic_t fscache_n_attr_changed;
42036-extern atomic_t fscache_n_attr_changed_ok;
42037-extern atomic_t fscache_n_attr_changed_nobufs;
42038-extern atomic_t fscache_n_attr_changed_nomem;
42039-extern atomic_t fscache_n_attr_changed_calls;
42040-
42041-extern atomic_t fscache_n_allocs;
42042-extern atomic_t fscache_n_allocs_ok;
42043-extern atomic_t fscache_n_allocs_wait;
42044-extern atomic_t fscache_n_allocs_nobufs;
42045-extern atomic_t fscache_n_allocs_intr;
42046-extern atomic_t fscache_n_allocs_object_dead;
42047-extern atomic_t fscache_n_alloc_ops;
42048-extern atomic_t fscache_n_alloc_op_waits;
42049-
42050-extern atomic_t fscache_n_retrievals;
42051-extern atomic_t fscache_n_retrievals_ok;
42052-extern atomic_t fscache_n_retrievals_wait;
42053-extern atomic_t fscache_n_retrievals_nodata;
42054-extern atomic_t fscache_n_retrievals_nobufs;
42055-extern atomic_t fscache_n_retrievals_intr;
42056-extern atomic_t fscache_n_retrievals_nomem;
42057-extern atomic_t fscache_n_retrievals_object_dead;
42058-extern atomic_t fscache_n_retrieval_ops;
42059-extern atomic_t fscache_n_retrieval_op_waits;
42060-
42061-extern atomic_t fscache_n_stores;
42062-extern atomic_t fscache_n_stores_ok;
42063-extern atomic_t fscache_n_stores_again;
42064-extern atomic_t fscache_n_stores_nobufs;
42065-extern atomic_t fscache_n_stores_oom;
42066-extern atomic_t fscache_n_store_ops;
42067-extern atomic_t fscache_n_store_calls;
42068-extern atomic_t fscache_n_store_pages;
42069-extern atomic_t fscache_n_store_radix_deletes;
42070-extern atomic_t fscache_n_store_pages_over_limit;
42071-
42072-extern atomic_t fscache_n_store_vmscan_not_storing;
42073-extern atomic_t fscache_n_store_vmscan_gone;
42074-extern atomic_t fscache_n_store_vmscan_busy;
42075-extern atomic_t fscache_n_store_vmscan_cancelled;
42076-
42077-extern atomic_t fscache_n_marks;
42078-extern atomic_t fscache_n_uncaches;
42079-
42080-extern atomic_t fscache_n_acquires;
42081-extern atomic_t fscache_n_acquires_null;
42082-extern atomic_t fscache_n_acquires_no_cache;
42083-extern atomic_t fscache_n_acquires_ok;
42084-extern atomic_t fscache_n_acquires_nobufs;
42085-extern atomic_t fscache_n_acquires_oom;
42086-
42087-extern atomic_t fscache_n_updates;
42088-extern atomic_t fscache_n_updates_null;
42089-extern atomic_t fscache_n_updates_run;
42090-
42091-extern atomic_t fscache_n_relinquishes;
42092-extern atomic_t fscache_n_relinquishes_null;
42093-extern atomic_t fscache_n_relinquishes_waitcrt;
42094-extern atomic_t fscache_n_relinquishes_retire;
42095-
42096-extern atomic_t fscache_n_cookie_index;
42097-extern atomic_t fscache_n_cookie_data;
42098-extern atomic_t fscache_n_cookie_special;
42099-
42100-extern atomic_t fscache_n_object_alloc;
42101-extern atomic_t fscache_n_object_no_alloc;
42102-extern atomic_t fscache_n_object_lookups;
42103-extern atomic_t fscache_n_object_lookups_negative;
42104-extern atomic_t fscache_n_object_lookups_positive;
42105-extern atomic_t fscache_n_object_lookups_timed_out;
42106-extern atomic_t fscache_n_object_created;
42107-extern atomic_t fscache_n_object_avail;
42108-extern atomic_t fscache_n_object_dead;
42109-
42110-extern atomic_t fscache_n_checkaux_none;
42111-extern atomic_t fscache_n_checkaux_okay;
42112-extern atomic_t fscache_n_checkaux_update;
42113-extern atomic_t fscache_n_checkaux_obsolete;
42114+extern atomic_unchecked_t fscache_n_op_pend;
42115+extern atomic_unchecked_t fscache_n_op_run;
42116+extern atomic_unchecked_t fscache_n_op_enqueue;
42117+extern atomic_unchecked_t fscache_n_op_deferred_release;
42118+extern atomic_unchecked_t fscache_n_op_release;
42119+extern atomic_unchecked_t fscache_n_op_gc;
42120+extern atomic_unchecked_t fscache_n_op_cancelled;
42121+extern atomic_unchecked_t fscache_n_op_rejected;
42122+
42123+extern atomic_unchecked_t fscache_n_attr_changed;
42124+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42125+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42126+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42127+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42128+
42129+extern atomic_unchecked_t fscache_n_allocs;
42130+extern atomic_unchecked_t fscache_n_allocs_ok;
42131+extern atomic_unchecked_t fscache_n_allocs_wait;
42132+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42133+extern atomic_unchecked_t fscache_n_allocs_intr;
42134+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42135+extern atomic_unchecked_t fscache_n_alloc_ops;
42136+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42137+
42138+extern atomic_unchecked_t fscache_n_retrievals;
42139+extern atomic_unchecked_t fscache_n_retrievals_ok;
42140+extern atomic_unchecked_t fscache_n_retrievals_wait;
42141+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42142+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42143+extern atomic_unchecked_t fscache_n_retrievals_intr;
42144+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42145+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42146+extern atomic_unchecked_t fscache_n_retrieval_ops;
42147+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42148+
42149+extern atomic_unchecked_t fscache_n_stores;
42150+extern atomic_unchecked_t fscache_n_stores_ok;
42151+extern atomic_unchecked_t fscache_n_stores_again;
42152+extern atomic_unchecked_t fscache_n_stores_nobufs;
42153+extern atomic_unchecked_t fscache_n_stores_oom;
42154+extern atomic_unchecked_t fscache_n_store_ops;
42155+extern atomic_unchecked_t fscache_n_store_calls;
42156+extern atomic_unchecked_t fscache_n_store_pages;
42157+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42158+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42159+
42160+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42161+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42162+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42163+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42164+
42165+extern atomic_unchecked_t fscache_n_marks;
42166+extern atomic_unchecked_t fscache_n_uncaches;
42167+
42168+extern atomic_unchecked_t fscache_n_acquires;
42169+extern atomic_unchecked_t fscache_n_acquires_null;
42170+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42171+extern atomic_unchecked_t fscache_n_acquires_ok;
42172+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42173+extern atomic_unchecked_t fscache_n_acquires_oom;
42174+
42175+extern atomic_unchecked_t fscache_n_updates;
42176+extern atomic_unchecked_t fscache_n_updates_null;
42177+extern atomic_unchecked_t fscache_n_updates_run;
42178+
42179+extern atomic_unchecked_t fscache_n_relinquishes;
42180+extern atomic_unchecked_t fscache_n_relinquishes_null;
42181+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42182+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42183+
42184+extern atomic_unchecked_t fscache_n_cookie_index;
42185+extern atomic_unchecked_t fscache_n_cookie_data;
42186+extern atomic_unchecked_t fscache_n_cookie_special;
42187+
42188+extern atomic_unchecked_t fscache_n_object_alloc;
42189+extern atomic_unchecked_t fscache_n_object_no_alloc;
42190+extern atomic_unchecked_t fscache_n_object_lookups;
42191+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42192+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42193+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42194+extern atomic_unchecked_t fscache_n_object_created;
42195+extern atomic_unchecked_t fscache_n_object_avail;
42196+extern atomic_unchecked_t fscache_n_object_dead;
42197+
42198+extern atomic_unchecked_t fscache_n_checkaux_none;
42199+extern atomic_unchecked_t fscache_n_checkaux_okay;
42200+extern atomic_unchecked_t fscache_n_checkaux_update;
42201+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42202
42203 extern atomic_t fscache_n_cop_alloc_object;
42204 extern atomic_t fscache_n_cop_lookup_object;
42205@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42206 atomic_inc(stat);
42207 }
42208
42209+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42210+{
42211+ atomic_inc_unchecked(stat);
42212+}
42213+
42214 static inline void fscache_stat_d(atomic_t *stat)
42215 {
42216 atomic_dec(stat);
42217@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42218
42219 #define __fscache_stat(stat) (NULL)
42220 #define fscache_stat(stat) do {} while (0)
42221+#define fscache_stat_unchecked(stat) do {} while (0)
42222 #define fscache_stat_d(stat) do {} while (0)
42223 #endif
42224
42225diff -urNp linux-3.0.7/fs/fscache/object.c linux-3.0.7/fs/fscache/object.c
42226--- linux-3.0.7/fs/fscache/object.c 2011-07-21 22:17:23.000000000 -0400
42227+++ linux-3.0.7/fs/fscache/object.c 2011-08-23 21:47:56.000000000 -0400
42228@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42229 /* update the object metadata on disk */
42230 case FSCACHE_OBJECT_UPDATING:
42231 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42232- fscache_stat(&fscache_n_updates_run);
42233+ fscache_stat_unchecked(&fscache_n_updates_run);
42234 fscache_stat(&fscache_n_cop_update_object);
42235 object->cache->ops->update_object(object);
42236 fscache_stat_d(&fscache_n_cop_update_object);
42237@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42238 spin_lock(&object->lock);
42239 object->state = FSCACHE_OBJECT_DEAD;
42240 spin_unlock(&object->lock);
42241- fscache_stat(&fscache_n_object_dead);
42242+ fscache_stat_unchecked(&fscache_n_object_dead);
42243 goto terminal_transit;
42244
42245 /* handle the parent cache of this object being withdrawn from
42246@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42247 spin_lock(&object->lock);
42248 object->state = FSCACHE_OBJECT_DEAD;
42249 spin_unlock(&object->lock);
42250- fscache_stat(&fscache_n_object_dead);
42251+ fscache_stat_unchecked(&fscache_n_object_dead);
42252 goto terminal_transit;
42253
42254 /* complain about the object being woken up once it is
42255@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42256 parent->cookie->def->name, cookie->def->name,
42257 object->cache->tag->name);
42258
42259- fscache_stat(&fscache_n_object_lookups);
42260+ fscache_stat_unchecked(&fscache_n_object_lookups);
42261 fscache_stat(&fscache_n_cop_lookup_object);
42262 ret = object->cache->ops->lookup_object(object);
42263 fscache_stat_d(&fscache_n_cop_lookup_object);
42264@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42265 if (ret == -ETIMEDOUT) {
42266 /* probably stuck behind another object, so move this one to
42267 * the back of the queue */
42268- fscache_stat(&fscache_n_object_lookups_timed_out);
42269+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42270 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42271 }
42272
42273@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42274
42275 spin_lock(&object->lock);
42276 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42277- fscache_stat(&fscache_n_object_lookups_negative);
42278+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42279
42280 /* transit here to allow write requests to begin stacking up
42281 * and read requests to begin returning ENODATA */
42282@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42283 * result, in which case there may be data available */
42284 spin_lock(&object->lock);
42285 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42286- fscache_stat(&fscache_n_object_lookups_positive);
42287+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42288
42289 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42290
42291@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42292 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42293 } else {
42294 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42295- fscache_stat(&fscache_n_object_created);
42296+ fscache_stat_unchecked(&fscache_n_object_created);
42297
42298 object->state = FSCACHE_OBJECT_AVAILABLE;
42299 spin_unlock(&object->lock);
42300@@ -602,7 +602,7 @@ static void fscache_object_available(str
42301 fscache_enqueue_dependents(object);
42302
42303 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42304- fscache_stat(&fscache_n_object_avail);
42305+ fscache_stat_unchecked(&fscache_n_object_avail);
42306
42307 _leave("");
42308 }
42309@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42310 enum fscache_checkaux result;
42311
42312 if (!object->cookie->def->check_aux) {
42313- fscache_stat(&fscache_n_checkaux_none);
42314+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42315 return FSCACHE_CHECKAUX_OKAY;
42316 }
42317
42318@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42319 switch (result) {
42320 /* entry okay as is */
42321 case FSCACHE_CHECKAUX_OKAY:
42322- fscache_stat(&fscache_n_checkaux_okay);
42323+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42324 break;
42325
42326 /* entry requires update */
42327 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42328- fscache_stat(&fscache_n_checkaux_update);
42329+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42330 break;
42331
42332 /* entry requires deletion */
42333 case FSCACHE_CHECKAUX_OBSOLETE:
42334- fscache_stat(&fscache_n_checkaux_obsolete);
42335+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42336 break;
42337
42338 default:
42339diff -urNp linux-3.0.7/fs/fscache/operation.c linux-3.0.7/fs/fscache/operation.c
42340--- linux-3.0.7/fs/fscache/operation.c 2011-07-21 22:17:23.000000000 -0400
42341+++ linux-3.0.7/fs/fscache/operation.c 2011-08-23 21:47:56.000000000 -0400
42342@@ -17,7 +17,7 @@
42343 #include <linux/slab.h>
42344 #include "internal.h"
42345
42346-atomic_t fscache_op_debug_id;
42347+atomic_unchecked_t fscache_op_debug_id;
42348 EXPORT_SYMBOL(fscache_op_debug_id);
42349
42350 /**
42351@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42352 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42353 ASSERTCMP(atomic_read(&op->usage), >, 0);
42354
42355- fscache_stat(&fscache_n_op_enqueue);
42356+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42357 switch (op->flags & FSCACHE_OP_TYPE) {
42358 case FSCACHE_OP_ASYNC:
42359 _debug("queue async");
42360@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42361 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42362 if (op->processor)
42363 fscache_enqueue_operation(op);
42364- fscache_stat(&fscache_n_op_run);
42365+ fscache_stat_unchecked(&fscache_n_op_run);
42366 }
42367
42368 /*
42369@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42370 if (object->n_ops > 1) {
42371 atomic_inc(&op->usage);
42372 list_add_tail(&op->pend_link, &object->pending_ops);
42373- fscache_stat(&fscache_n_op_pend);
42374+ fscache_stat_unchecked(&fscache_n_op_pend);
42375 } else if (!list_empty(&object->pending_ops)) {
42376 atomic_inc(&op->usage);
42377 list_add_tail(&op->pend_link, &object->pending_ops);
42378- fscache_stat(&fscache_n_op_pend);
42379+ fscache_stat_unchecked(&fscache_n_op_pend);
42380 fscache_start_operations(object);
42381 } else {
42382 ASSERTCMP(object->n_in_progress, ==, 0);
42383@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42384 object->n_exclusive++; /* reads and writes must wait */
42385 atomic_inc(&op->usage);
42386 list_add_tail(&op->pend_link, &object->pending_ops);
42387- fscache_stat(&fscache_n_op_pend);
42388+ fscache_stat_unchecked(&fscache_n_op_pend);
42389 ret = 0;
42390 } else {
42391 /* not allowed to submit ops in any other state */
42392@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42393 if (object->n_exclusive > 0) {
42394 atomic_inc(&op->usage);
42395 list_add_tail(&op->pend_link, &object->pending_ops);
42396- fscache_stat(&fscache_n_op_pend);
42397+ fscache_stat_unchecked(&fscache_n_op_pend);
42398 } else if (!list_empty(&object->pending_ops)) {
42399 atomic_inc(&op->usage);
42400 list_add_tail(&op->pend_link, &object->pending_ops);
42401- fscache_stat(&fscache_n_op_pend);
42402+ fscache_stat_unchecked(&fscache_n_op_pend);
42403 fscache_start_operations(object);
42404 } else {
42405 ASSERTCMP(object->n_exclusive, ==, 0);
42406@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42407 object->n_ops++;
42408 atomic_inc(&op->usage);
42409 list_add_tail(&op->pend_link, &object->pending_ops);
42410- fscache_stat(&fscache_n_op_pend);
42411+ fscache_stat_unchecked(&fscache_n_op_pend);
42412 ret = 0;
42413 } else if (object->state == FSCACHE_OBJECT_DYING ||
42414 object->state == FSCACHE_OBJECT_LC_DYING ||
42415 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42416- fscache_stat(&fscache_n_op_rejected);
42417+ fscache_stat_unchecked(&fscache_n_op_rejected);
42418 ret = -ENOBUFS;
42419 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42420 fscache_report_unexpected_submission(object, op, ostate);
42421@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42422
42423 ret = -EBUSY;
42424 if (!list_empty(&op->pend_link)) {
42425- fscache_stat(&fscache_n_op_cancelled);
42426+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42427 list_del_init(&op->pend_link);
42428 object->n_ops--;
42429 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42430@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42431 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42432 BUG();
42433
42434- fscache_stat(&fscache_n_op_release);
42435+ fscache_stat_unchecked(&fscache_n_op_release);
42436
42437 if (op->release) {
42438 op->release(op);
42439@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42440 * lock, and defer it otherwise */
42441 if (!spin_trylock(&object->lock)) {
42442 _debug("defer put");
42443- fscache_stat(&fscache_n_op_deferred_release);
42444+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42445
42446 cache = object->cache;
42447 spin_lock(&cache->op_gc_list_lock);
42448@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42449
42450 _debug("GC DEFERRED REL OBJ%x OP%x",
42451 object->debug_id, op->debug_id);
42452- fscache_stat(&fscache_n_op_gc);
42453+ fscache_stat_unchecked(&fscache_n_op_gc);
42454
42455 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42456
42457diff -urNp linux-3.0.7/fs/fscache/page.c linux-3.0.7/fs/fscache/page.c
42458--- linux-3.0.7/fs/fscache/page.c 2011-07-21 22:17:23.000000000 -0400
42459+++ linux-3.0.7/fs/fscache/page.c 2011-08-23 21:47:56.000000000 -0400
42460@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42461 val = radix_tree_lookup(&cookie->stores, page->index);
42462 if (!val) {
42463 rcu_read_unlock();
42464- fscache_stat(&fscache_n_store_vmscan_not_storing);
42465+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42466 __fscache_uncache_page(cookie, page);
42467 return true;
42468 }
42469@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42470 spin_unlock(&cookie->stores_lock);
42471
42472 if (xpage) {
42473- fscache_stat(&fscache_n_store_vmscan_cancelled);
42474- fscache_stat(&fscache_n_store_radix_deletes);
42475+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42476+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42477 ASSERTCMP(xpage, ==, page);
42478 } else {
42479- fscache_stat(&fscache_n_store_vmscan_gone);
42480+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42481 }
42482
42483 wake_up_bit(&cookie->flags, 0);
42484@@ -107,7 +107,7 @@ page_busy:
42485 /* we might want to wait here, but that could deadlock the allocator as
42486 * the work threads writing to the cache may all end up sleeping
42487 * on memory allocation */
42488- fscache_stat(&fscache_n_store_vmscan_busy);
42489+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42490 return false;
42491 }
42492 EXPORT_SYMBOL(__fscache_maybe_release_page);
42493@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
42494 FSCACHE_COOKIE_STORING_TAG);
42495 if (!radix_tree_tag_get(&cookie->stores, page->index,
42496 FSCACHE_COOKIE_PENDING_TAG)) {
42497- fscache_stat(&fscache_n_store_radix_deletes);
42498+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42499 xpage = radix_tree_delete(&cookie->stores, page->index);
42500 }
42501 spin_unlock(&cookie->stores_lock);
42502@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
42503
42504 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42505
42506- fscache_stat(&fscache_n_attr_changed_calls);
42507+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42508
42509 if (fscache_object_is_active(object)) {
42510 fscache_stat(&fscache_n_cop_attr_changed);
42511@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
42512
42513 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42514
42515- fscache_stat(&fscache_n_attr_changed);
42516+ fscache_stat_unchecked(&fscache_n_attr_changed);
42517
42518 op = kzalloc(sizeof(*op), GFP_KERNEL);
42519 if (!op) {
42520- fscache_stat(&fscache_n_attr_changed_nomem);
42521+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42522 _leave(" = -ENOMEM");
42523 return -ENOMEM;
42524 }
42525@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
42526 if (fscache_submit_exclusive_op(object, op) < 0)
42527 goto nobufs;
42528 spin_unlock(&cookie->lock);
42529- fscache_stat(&fscache_n_attr_changed_ok);
42530+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42531 fscache_put_operation(op);
42532 _leave(" = 0");
42533 return 0;
42534@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
42535 nobufs:
42536 spin_unlock(&cookie->lock);
42537 kfree(op);
42538- fscache_stat(&fscache_n_attr_changed_nobufs);
42539+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42540 _leave(" = %d", -ENOBUFS);
42541 return -ENOBUFS;
42542 }
42543@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
42544 /* allocate a retrieval operation and attempt to submit it */
42545 op = kzalloc(sizeof(*op), GFP_NOIO);
42546 if (!op) {
42547- fscache_stat(&fscache_n_retrievals_nomem);
42548+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42549 return NULL;
42550 }
42551
42552@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
42553 return 0;
42554 }
42555
42556- fscache_stat(&fscache_n_retrievals_wait);
42557+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42558
42559 jif = jiffies;
42560 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42561 fscache_wait_bit_interruptible,
42562 TASK_INTERRUPTIBLE) != 0) {
42563- fscache_stat(&fscache_n_retrievals_intr);
42564+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42565 _leave(" = -ERESTARTSYS");
42566 return -ERESTARTSYS;
42567 }
42568@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
42569 */
42570 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42571 struct fscache_retrieval *op,
42572- atomic_t *stat_op_waits,
42573- atomic_t *stat_object_dead)
42574+ atomic_unchecked_t *stat_op_waits,
42575+ atomic_unchecked_t *stat_object_dead)
42576 {
42577 int ret;
42578
42579@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
42580 goto check_if_dead;
42581
42582 _debug(">>> WT");
42583- fscache_stat(stat_op_waits);
42584+ fscache_stat_unchecked(stat_op_waits);
42585 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42586 fscache_wait_bit_interruptible,
42587 TASK_INTERRUPTIBLE) < 0) {
42588@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
42589
42590 check_if_dead:
42591 if (unlikely(fscache_object_is_dead(object))) {
42592- fscache_stat(stat_object_dead);
42593+ fscache_stat_unchecked(stat_object_dead);
42594 return -ENOBUFS;
42595 }
42596 return 0;
42597@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
42598
42599 _enter("%p,%p,,,", cookie, page);
42600
42601- fscache_stat(&fscache_n_retrievals);
42602+ fscache_stat_unchecked(&fscache_n_retrievals);
42603
42604 if (hlist_empty(&cookie->backing_objects))
42605 goto nobufs;
42606@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
42607 goto nobufs_unlock;
42608 spin_unlock(&cookie->lock);
42609
42610- fscache_stat(&fscache_n_retrieval_ops);
42611+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42612
42613 /* pin the netfs read context in case we need to do the actual netfs
42614 * read because we've encountered a cache read failure */
42615@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
42616
42617 error:
42618 if (ret == -ENOMEM)
42619- fscache_stat(&fscache_n_retrievals_nomem);
42620+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42621 else if (ret == -ERESTARTSYS)
42622- fscache_stat(&fscache_n_retrievals_intr);
42623+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42624 else if (ret == -ENODATA)
42625- fscache_stat(&fscache_n_retrievals_nodata);
42626+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42627 else if (ret < 0)
42628- fscache_stat(&fscache_n_retrievals_nobufs);
42629+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42630 else
42631- fscache_stat(&fscache_n_retrievals_ok);
42632+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42633
42634 fscache_put_retrieval(op);
42635 _leave(" = %d", ret);
42636@@ -429,7 +429,7 @@ nobufs_unlock:
42637 spin_unlock(&cookie->lock);
42638 kfree(op);
42639 nobufs:
42640- fscache_stat(&fscache_n_retrievals_nobufs);
42641+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42642 _leave(" = -ENOBUFS");
42643 return -ENOBUFS;
42644 }
42645@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
42646
42647 _enter("%p,,%d,,,", cookie, *nr_pages);
42648
42649- fscache_stat(&fscache_n_retrievals);
42650+ fscache_stat_unchecked(&fscache_n_retrievals);
42651
42652 if (hlist_empty(&cookie->backing_objects))
42653 goto nobufs;
42654@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
42655 goto nobufs_unlock;
42656 spin_unlock(&cookie->lock);
42657
42658- fscache_stat(&fscache_n_retrieval_ops);
42659+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42660
42661 /* pin the netfs read context in case we need to do the actual netfs
42662 * read because we've encountered a cache read failure */
42663@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
42664
42665 error:
42666 if (ret == -ENOMEM)
42667- fscache_stat(&fscache_n_retrievals_nomem);
42668+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42669 else if (ret == -ERESTARTSYS)
42670- fscache_stat(&fscache_n_retrievals_intr);
42671+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42672 else if (ret == -ENODATA)
42673- fscache_stat(&fscache_n_retrievals_nodata);
42674+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42675 else if (ret < 0)
42676- fscache_stat(&fscache_n_retrievals_nobufs);
42677+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42678 else
42679- fscache_stat(&fscache_n_retrievals_ok);
42680+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42681
42682 fscache_put_retrieval(op);
42683 _leave(" = %d", ret);
42684@@ -545,7 +545,7 @@ nobufs_unlock:
42685 spin_unlock(&cookie->lock);
42686 kfree(op);
42687 nobufs:
42688- fscache_stat(&fscache_n_retrievals_nobufs);
42689+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42690 _leave(" = -ENOBUFS");
42691 return -ENOBUFS;
42692 }
42693@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
42694
42695 _enter("%p,%p,,,", cookie, page);
42696
42697- fscache_stat(&fscache_n_allocs);
42698+ fscache_stat_unchecked(&fscache_n_allocs);
42699
42700 if (hlist_empty(&cookie->backing_objects))
42701 goto nobufs;
42702@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
42703 goto nobufs_unlock;
42704 spin_unlock(&cookie->lock);
42705
42706- fscache_stat(&fscache_n_alloc_ops);
42707+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42708
42709 ret = fscache_wait_for_retrieval_activation(
42710 object, op,
42711@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
42712
42713 error:
42714 if (ret == -ERESTARTSYS)
42715- fscache_stat(&fscache_n_allocs_intr);
42716+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42717 else if (ret < 0)
42718- fscache_stat(&fscache_n_allocs_nobufs);
42719+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42720 else
42721- fscache_stat(&fscache_n_allocs_ok);
42722+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42723
42724 fscache_put_retrieval(op);
42725 _leave(" = %d", ret);
42726@@ -625,7 +625,7 @@ nobufs_unlock:
42727 spin_unlock(&cookie->lock);
42728 kfree(op);
42729 nobufs:
42730- fscache_stat(&fscache_n_allocs_nobufs);
42731+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42732 _leave(" = -ENOBUFS");
42733 return -ENOBUFS;
42734 }
42735@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
42736
42737 spin_lock(&cookie->stores_lock);
42738
42739- fscache_stat(&fscache_n_store_calls);
42740+ fscache_stat_unchecked(&fscache_n_store_calls);
42741
42742 /* find a page to store */
42743 page = NULL;
42744@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
42745 page = results[0];
42746 _debug("gang %d [%lx]", n, page->index);
42747 if (page->index > op->store_limit) {
42748- fscache_stat(&fscache_n_store_pages_over_limit);
42749+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42750 goto superseded;
42751 }
42752
42753@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
42754 spin_unlock(&cookie->stores_lock);
42755 spin_unlock(&object->lock);
42756
42757- fscache_stat(&fscache_n_store_pages);
42758+ fscache_stat_unchecked(&fscache_n_store_pages);
42759 fscache_stat(&fscache_n_cop_write_page);
42760 ret = object->cache->ops->write_page(op, page);
42761 fscache_stat_d(&fscache_n_cop_write_page);
42762@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
42763 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42764 ASSERT(PageFsCache(page));
42765
42766- fscache_stat(&fscache_n_stores);
42767+ fscache_stat_unchecked(&fscache_n_stores);
42768
42769 op = kzalloc(sizeof(*op), GFP_NOIO);
42770 if (!op)
42771@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
42772 spin_unlock(&cookie->stores_lock);
42773 spin_unlock(&object->lock);
42774
42775- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42776+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42777 op->store_limit = object->store_limit;
42778
42779 if (fscache_submit_op(object, &op->op) < 0)
42780@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
42781
42782 spin_unlock(&cookie->lock);
42783 radix_tree_preload_end();
42784- fscache_stat(&fscache_n_store_ops);
42785- fscache_stat(&fscache_n_stores_ok);
42786+ fscache_stat_unchecked(&fscache_n_store_ops);
42787+ fscache_stat_unchecked(&fscache_n_stores_ok);
42788
42789 /* the work queue now carries its own ref on the object */
42790 fscache_put_operation(&op->op);
42791@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
42792 return 0;
42793
42794 already_queued:
42795- fscache_stat(&fscache_n_stores_again);
42796+ fscache_stat_unchecked(&fscache_n_stores_again);
42797 already_pending:
42798 spin_unlock(&cookie->stores_lock);
42799 spin_unlock(&object->lock);
42800 spin_unlock(&cookie->lock);
42801 radix_tree_preload_end();
42802 kfree(op);
42803- fscache_stat(&fscache_n_stores_ok);
42804+ fscache_stat_unchecked(&fscache_n_stores_ok);
42805 _leave(" = 0");
42806 return 0;
42807
42808@@ -851,14 +851,14 @@ nobufs:
42809 spin_unlock(&cookie->lock);
42810 radix_tree_preload_end();
42811 kfree(op);
42812- fscache_stat(&fscache_n_stores_nobufs);
42813+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42814 _leave(" = -ENOBUFS");
42815 return -ENOBUFS;
42816
42817 nomem_free:
42818 kfree(op);
42819 nomem:
42820- fscache_stat(&fscache_n_stores_oom);
42821+ fscache_stat_unchecked(&fscache_n_stores_oom);
42822 _leave(" = -ENOMEM");
42823 return -ENOMEM;
42824 }
42825@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
42826 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42827 ASSERTCMP(page, !=, NULL);
42828
42829- fscache_stat(&fscache_n_uncaches);
42830+ fscache_stat_unchecked(&fscache_n_uncaches);
42831
42832 /* cache withdrawal may beat us to it */
42833 if (!PageFsCache(page))
42834@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
42835 unsigned long loop;
42836
42837 #ifdef CONFIG_FSCACHE_STATS
42838- atomic_add(pagevec->nr, &fscache_n_marks);
42839+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42840 #endif
42841
42842 for (loop = 0; loop < pagevec->nr; loop++) {
42843diff -urNp linux-3.0.7/fs/fscache/stats.c linux-3.0.7/fs/fscache/stats.c
42844--- linux-3.0.7/fs/fscache/stats.c 2011-07-21 22:17:23.000000000 -0400
42845+++ linux-3.0.7/fs/fscache/stats.c 2011-08-23 21:47:56.000000000 -0400
42846@@ -18,95 +18,95 @@
42847 /*
42848 * operation counters
42849 */
42850-atomic_t fscache_n_op_pend;
42851-atomic_t fscache_n_op_run;
42852-atomic_t fscache_n_op_enqueue;
42853-atomic_t fscache_n_op_requeue;
42854-atomic_t fscache_n_op_deferred_release;
42855-atomic_t fscache_n_op_release;
42856-atomic_t fscache_n_op_gc;
42857-atomic_t fscache_n_op_cancelled;
42858-atomic_t fscache_n_op_rejected;
42859-
42860-atomic_t fscache_n_attr_changed;
42861-atomic_t fscache_n_attr_changed_ok;
42862-atomic_t fscache_n_attr_changed_nobufs;
42863-atomic_t fscache_n_attr_changed_nomem;
42864-atomic_t fscache_n_attr_changed_calls;
42865-
42866-atomic_t fscache_n_allocs;
42867-atomic_t fscache_n_allocs_ok;
42868-atomic_t fscache_n_allocs_wait;
42869-atomic_t fscache_n_allocs_nobufs;
42870-atomic_t fscache_n_allocs_intr;
42871-atomic_t fscache_n_allocs_object_dead;
42872-atomic_t fscache_n_alloc_ops;
42873-atomic_t fscache_n_alloc_op_waits;
42874-
42875-atomic_t fscache_n_retrievals;
42876-atomic_t fscache_n_retrievals_ok;
42877-atomic_t fscache_n_retrievals_wait;
42878-atomic_t fscache_n_retrievals_nodata;
42879-atomic_t fscache_n_retrievals_nobufs;
42880-atomic_t fscache_n_retrievals_intr;
42881-atomic_t fscache_n_retrievals_nomem;
42882-atomic_t fscache_n_retrievals_object_dead;
42883-atomic_t fscache_n_retrieval_ops;
42884-atomic_t fscache_n_retrieval_op_waits;
42885-
42886-atomic_t fscache_n_stores;
42887-atomic_t fscache_n_stores_ok;
42888-atomic_t fscache_n_stores_again;
42889-atomic_t fscache_n_stores_nobufs;
42890-atomic_t fscache_n_stores_oom;
42891-atomic_t fscache_n_store_ops;
42892-atomic_t fscache_n_store_calls;
42893-atomic_t fscache_n_store_pages;
42894-atomic_t fscache_n_store_radix_deletes;
42895-atomic_t fscache_n_store_pages_over_limit;
42896-
42897-atomic_t fscache_n_store_vmscan_not_storing;
42898-atomic_t fscache_n_store_vmscan_gone;
42899-atomic_t fscache_n_store_vmscan_busy;
42900-atomic_t fscache_n_store_vmscan_cancelled;
42901-
42902-atomic_t fscache_n_marks;
42903-atomic_t fscache_n_uncaches;
42904-
42905-atomic_t fscache_n_acquires;
42906-atomic_t fscache_n_acquires_null;
42907-atomic_t fscache_n_acquires_no_cache;
42908-atomic_t fscache_n_acquires_ok;
42909-atomic_t fscache_n_acquires_nobufs;
42910-atomic_t fscache_n_acquires_oom;
42911-
42912-atomic_t fscache_n_updates;
42913-atomic_t fscache_n_updates_null;
42914-atomic_t fscache_n_updates_run;
42915-
42916-atomic_t fscache_n_relinquishes;
42917-atomic_t fscache_n_relinquishes_null;
42918-atomic_t fscache_n_relinquishes_waitcrt;
42919-atomic_t fscache_n_relinquishes_retire;
42920-
42921-atomic_t fscache_n_cookie_index;
42922-atomic_t fscache_n_cookie_data;
42923-atomic_t fscache_n_cookie_special;
42924-
42925-atomic_t fscache_n_object_alloc;
42926-atomic_t fscache_n_object_no_alloc;
42927-atomic_t fscache_n_object_lookups;
42928-atomic_t fscache_n_object_lookups_negative;
42929-atomic_t fscache_n_object_lookups_positive;
42930-atomic_t fscache_n_object_lookups_timed_out;
42931-atomic_t fscache_n_object_created;
42932-atomic_t fscache_n_object_avail;
42933-atomic_t fscache_n_object_dead;
42934-
42935-atomic_t fscache_n_checkaux_none;
42936-atomic_t fscache_n_checkaux_okay;
42937-atomic_t fscache_n_checkaux_update;
42938-atomic_t fscache_n_checkaux_obsolete;
42939+atomic_unchecked_t fscache_n_op_pend;
42940+atomic_unchecked_t fscache_n_op_run;
42941+atomic_unchecked_t fscache_n_op_enqueue;
42942+atomic_unchecked_t fscache_n_op_requeue;
42943+atomic_unchecked_t fscache_n_op_deferred_release;
42944+atomic_unchecked_t fscache_n_op_release;
42945+atomic_unchecked_t fscache_n_op_gc;
42946+atomic_unchecked_t fscache_n_op_cancelled;
42947+atomic_unchecked_t fscache_n_op_rejected;
42948+
42949+atomic_unchecked_t fscache_n_attr_changed;
42950+atomic_unchecked_t fscache_n_attr_changed_ok;
42951+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42952+atomic_unchecked_t fscache_n_attr_changed_nomem;
42953+atomic_unchecked_t fscache_n_attr_changed_calls;
42954+
42955+atomic_unchecked_t fscache_n_allocs;
42956+atomic_unchecked_t fscache_n_allocs_ok;
42957+atomic_unchecked_t fscache_n_allocs_wait;
42958+atomic_unchecked_t fscache_n_allocs_nobufs;
42959+atomic_unchecked_t fscache_n_allocs_intr;
42960+atomic_unchecked_t fscache_n_allocs_object_dead;
42961+atomic_unchecked_t fscache_n_alloc_ops;
42962+atomic_unchecked_t fscache_n_alloc_op_waits;
42963+
42964+atomic_unchecked_t fscache_n_retrievals;
42965+atomic_unchecked_t fscache_n_retrievals_ok;
42966+atomic_unchecked_t fscache_n_retrievals_wait;
42967+atomic_unchecked_t fscache_n_retrievals_nodata;
42968+atomic_unchecked_t fscache_n_retrievals_nobufs;
42969+atomic_unchecked_t fscache_n_retrievals_intr;
42970+atomic_unchecked_t fscache_n_retrievals_nomem;
42971+atomic_unchecked_t fscache_n_retrievals_object_dead;
42972+atomic_unchecked_t fscache_n_retrieval_ops;
42973+atomic_unchecked_t fscache_n_retrieval_op_waits;
42974+
42975+atomic_unchecked_t fscache_n_stores;
42976+atomic_unchecked_t fscache_n_stores_ok;
42977+atomic_unchecked_t fscache_n_stores_again;
42978+atomic_unchecked_t fscache_n_stores_nobufs;
42979+atomic_unchecked_t fscache_n_stores_oom;
42980+atomic_unchecked_t fscache_n_store_ops;
42981+atomic_unchecked_t fscache_n_store_calls;
42982+atomic_unchecked_t fscache_n_store_pages;
42983+atomic_unchecked_t fscache_n_store_radix_deletes;
42984+atomic_unchecked_t fscache_n_store_pages_over_limit;
42985+
42986+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42987+atomic_unchecked_t fscache_n_store_vmscan_gone;
42988+atomic_unchecked_t fscache_n_store_vmscan_busy;
42989+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42990+
42991+atomic_unchecked_t fscache_n_marks;
42992+atomic_unchecked_t fscache_n_uncaches;
42993+
42994+atomic_unchecked_t fscache_n_acquires;
42995+atomic_unchecked_t fscache_n_acquires_null;
42996+atomic_unchecked_t fscache_n_acquires_no_cache;
42997+atomic_unchecked_t fscache_n_acquires_ok;
42998+atomic_unchecked_t fscache_n_acquires_nobufs;
42999+atomic_unchecked_t fscache_n_acquires_oom;
43000+
43001+atomic_unchecked_t fscache_n_updates;
43002+atomic_unchecked_t fscache_n_updates_null;
43003+atomic_unchecked_t fscache_n_updates_run;
43004+
43005+atomic_unchecked_t fscache_n_relinquishes;
43006+atomic_unchecked_t fscache_n_relinquishes_null;
43007+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43008+atomic_unchecked_t fscache_n_relinquishes_retire;
43009+
43010+atomic_unchecked_t fscache_n_cookie_index;
43011+atomic_unchecked_t fscache_n_cookie_data;
43012+atomic_unchecked_t fscache_n_cookie_special;
43013+
43014+atomic_unchecked_t fscache_n_object_alloc;
43015+atomic_unchecked_t fscache_n_object_no_alloc;
43016+atomic_unchecked_t fscache_n_object_lookups;
43017+atomic_unchecked_t fscache_n_object_lookups_negative;
43018+atomic_unchecked_t fscache_n_object_lookups_positive;
43019+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43020+atomic_unchecked_t fscache_n_object_created;
43021+atomic_unchecked_t fscache_n_object_avail;
43022+atomic_unchecked_t fscache_n_object_dead;
43023+
43024+atomic_unchecked_t fscache_n_checkaux_none;
43025+atomic_unchecked_t fscache_n_checkaux_okay;
43026+atomic_unchecked_t fscache_n_checkaux_update;
43027+atomic_unchecked_t fscache_n_checkaux_obsolete;
43028
43029 atomic_t fscache_n_cop_alloc_object;
43030 atomic_t fscache_n_cop_lookup_object;
43031@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43032 seq_puts(m, "FS-Cache statistics\n");
43033
43034 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43035- atomic_read(&fscache_n_cookie_index),
43036- atomic_read(&fscache_n_cookie_data),
43037- atomic_read(&fscache_n_cookie_special));
43038+ atomic_read_unchecked(&fscache_n_cookie_index),
43039+ atomic_read_unchecked(&fscache_n_cookie_data),
43040+ atomic_read_unchecked(&fscache_n_cookie_special));
43041
43042 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43043- atomic_read(&fscache_n_object_alloc),
43044- atomic_read(&fscache_n_object_no_alloc),
43045- atomic_read(&fscache_n_object_avail),
43046- atomic_read(&fscache_n_object_dead));
43047+ atomic_read_unchecked(&fscache_n_object_alloc),
43048+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43049+ atomic_read_unchecked(&fscache_n_object_avail),
43050+ atomic_read_unchecked(&fscache_n_object_dead));
43051 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43052- atomic_read(&fscache_n_checkaux_none),
43053- atomic_read(&fscache_n_checkaux_okay),
43054- atomic_read(&fscache_n_checkaux_update),
43055- atomic_read(&fscache_n_checkaux_obsolete));
43056+ atomic_read_unchecked(&fscache_n_checkaux_none),
43057+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43058+ atomic_read_unchecked(&fscache_n_checkaux_update),
43059+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43060
43061 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43062- atomic_read(&fscache_n_marks),
43063- atomic_read(&fscache_n_uncaches));
43064+ atomic_read_unchecked(&fscache_n_marks),
43065+ atomic_read_unchecked(&fscache_n_uncaches));
43066
43067 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43068 " oom=%u\n",
43069- atomic_read(&fscache_n_acquires),
43070- atomic_read(&fscache_n_acquires_null),
43071- atomic_read(&fscache_n_acquires_no_cache),
43072- atomic_read(&fscache_n_acquires_ok),
43073- atomic_read(&fscache_n_acquires_nobufs),
43074- atomic_read(&fscache_n_acquires_oom));
43075+ atomic_read_unchecked(&fscache_n_acquires),
43076+ atomic_read_unchecked(&fscache_n_acquires_null),
43077+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43078+ atomic_read_unchecked(&fscache_n_acquires_ok),
43079+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43080+ atomic_read_unchecked(&fscache_n_acquires_oom));
43081
43082 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43083- atomic_read(&fscache_n_object_lookups),
43084- atomic_read(&fscache_n_object_lookups_negative),
43085- atomic_read(&fscache_n_object_lookups_positive),
43086- atomic_read(&fscache_n_object_created),
43087- atomic_read(&fscache_n_object_lookups_timed_out));
43088+ atomic_read_unchecked(&fscache_n_object_lookups),
43089+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43090+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43091+ atomic_read_unchecked(&fscache_n_object_created),
43092+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43093
43094 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43095- atomic_read(&fscache_n_updates),
43096- atomic_read(&fscache_n_updates_null),
43097- atomic_read(&fscache_n_updates_run));
43098+ atomic_read_unchecked(&fscache_n_updates),
43099+ atomic_read_unchecked(&fscache_n_updates_null),
43100+ atomic_read_unchecked(&fscache_n_updates_run));
43101
43102 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43103- atomic_read(&fscache_n_relinquishes),
43104- atomic_read(&fscache_n_relinquishes_null),
43105- atomic_read(&fscache_n_relinquishes_waitcrt),
43106- atomic_read(&fscache_n_relinquishes_retire));
43107+ atomic_read_unchecked(&fscache_n_relinquishes),
43108+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43109+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43110+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43111
43112 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43113- atomic_read(&fscache_n_attr_changed),
43114- atomic_read(&fscache_n_attr_changed_ok),
43115- atomic_read(&fscache_n_attr_changed_nobufs),
43116- atomic_read(&fscache_n_attr_changed_nomem),
43117- atomic_read(&fscache_n_attr_changed_calls));
43118+ atomic_read_unchecked(&fscache_n_attr_changed),
43119+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43120+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43121+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43122+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43123
43124 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43125- atomic_read(&fscache_n_allocs),
43126- atomic_read(&fscache_n_allocs_ok),
43127- atomic_read(&fscache_n_allocs_wait),
43128- atomic_read(&fscache_n_allocs_nobufs),
43129- atomic_read(&fscache_n_allocs_intr));
43130+ atomic_read_unchecked(&fscache_n_allocs),
43131+ atomic_read_unchecked(&fscache_n_allocs_ok),
43132+ atomic_read_unchecked(&fscache_n_allocs_wait),
43133+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43134+ atomic_read_unchecked(&fscache_n_allocs_intr));
43135 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43136- atomic_read(&fscache_n_alloc_ops),
43137- atomic_read(&fscache_n_alloc_op_waits),
43138- atomic_read(&fscache_n_allocs_object_dead));
43139+ atomic_read_unchecked(&fscache_n_alloc_ops),
43140+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43141+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43142
43143 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43144 " int=%u oom=%u\n",
43145- atomic_read(&fscache_n_retrievals),
43146- atomic_read(&fscache_n_retrievals_ok),
43147- atomic_read(&fscache_n_retrievals_wait),
43148- atomic_read(&fscache_n_retrievals_nodata),
43149- atomic_read(&fscache_n_retrievals_nobufs),
43150- atomic_read(&fscache_n_retrievals_intr),
43151- atomic_read(&fscache_n_retrievals_nomem));
43152+ atomic_read_unchecked(&fscache_n_retrievals),
43153+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43154+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43155+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43156+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43157+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43158+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43159 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43160- atomic_read(&fscache_n_retrieval_ops),
43161- atomic_read(&fscache_n_retrieval_op_waits),
43162- atomic_read(&fscache_n_retrievals_object_dead));
43163+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43164+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43165+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43166
43167 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43168- atomic_read(&fscache_n_stores),
43169- atomic_read(&fscache_n_stores_ok),
43170- atomic_read(&fscache_n_stores_again),
43171- atomic_read(&fscache_n_stores_nobufs),
43172- atomic_read(&fscache_n_stores_oom));
43173+ atomic_read_unchecked(&fscache_n_stores),
43174+ atomic_read_unchecked(&fscache_n_stores_ok),
43175+ atomic_read_unchecked(&fscache_n_stores_again),
43176+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43177+ atomic_read_unchecked(&fscache_n_stores_oom));
43178 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43179- atomic_read(&fscache_n_store_ops),
43180- atomic_read(&fscache_n_store_calls),
43181- atomic_read(&fscache_n_store_pages),
43182- atomic_read(&fscache_n_store_radix_deletes),
43183- atomic_read(&fscache_n_store_pages_over_limit));
43184+ atomic_read_unchecked(&fscache_n_store_ops),
43185+ atomic_read_unchecked(&fscache_n_store_calls),
43186+ atomic_read_unchecked(&fscache_n_store_pages),
43187+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43188+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43189
43190 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43191- atomic_read(&fscache_n_store_vmscan_not_storing),
43192- atomic_read(&fscache_n_store_vmscan_gone),
43193- atomic_read(&fscache_n_store_vmscan_busy),
43194- atomic_read(&fscache_n_store_vmscan_cancelled));
43195+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43196+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43197+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43198+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43199
43200 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43201- atomic_read(&fscache_n_op_pend),
43202- atomic_read(&fscache_n_op_run),
43203- atomic_read(&fscache_n_op_enqueue),
43204- atomic_read(&fscache_n_op_cancelled),
43205- atomic_read(&fscache_n_op_rejected));
43206+ atomic_read_unchecked(&fscache_n_op_pend),
43207+ atomic_read_unchecked(&fscache_n_op_run),
43208+ atomic_read_unchecked(&fscache_n_op_enqueue),
43209+ atomic_read_unchecked(&fscache_n_op_cancelled),
43210+ atomic_read_unchecked(&fscache_n_op_rejected));
43211 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43212- atomic_read(&fscache_n_op_deferred_release),
43213- atomic_read(&fscache_n_op_release),
43214- atomic_read(&fscache_n_op_gc));
43215+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43216+ atomic_read_unchecked(&fscache_n_op_release),
43217+ atomic_read_unchecked(&fscache_n_op_gc));
43218
43219 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43220 atomic_read(&fscache_n_cop_alloc_object),
43221diff -urNp linux-3.0.7/fs/fs_struct.c linux-3.0.7/fs/fs_struct.c
43222--- linux-3.0.7/fs/fs_struct.c 2011-07-21 22:17:23.000000000 -0400
43223+++ linux-3.0.7/fs/fs_struct.c 2011-08-23 21:48:14.000000000 -0400
43224@@ -4,6 +4,7 @@
43225 #include <linux/path.h>
43226 #include <linux/slab.h>
43227 #include <linux/fs_struct.h>
43228+#include <linux/grsecurity.h>
43229 #include "internal.h"
43230
43231 static inline void path_get_longterm(struct path *path)
43232@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43233 old_root = fs->root;
43234 fs->root = *path;
43235 path_get_longterm(path);
43236+ gr_set_chroot_entries(current, path);
43237 write_seqcount_end(&fs->seq);
43238 spin_unlock(&fs->lock);
43239 if (old_root.dentry)
43240@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43241 && fs->root.mnt == old_root->mnt) {
43242 path_get_longterm(new_root);
43243 fs->root = *new_root;
43244+ gr_set_chroot_entries(p, new_root);
43245 count++;
43246 }
43247 if (fs->pwd.dentry == old_root->dentry
43248@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43249 spin_lock(&fs->lock);
43250 write_seqcount_begin(&fs->seq);
43251 tsk->fs = NULL;
43252- kill = !--fs->users;
43253+ gr_clear_chroot_entries(tsk);
43254+ kill = !atomic_dec_return(&fs->users);
43255 write_seqcount_end(&fs->seq);
43256 spin_unlock(&fs->lock);
43257 task_unlock(tsk);
43258@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43259 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43260 /* We don't need to lock fs - think why ;-) */
43261 if (fs) {
43262- fs->users = 1;
43263+ atomic_set(&fs->users, 1);
43264 fs->in_exec = 0;
43265 spin_lock_init(&fs->lock);
43266 seqcount_init(&fs->seq);
43267@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43268 spin_lock(&old->lock);
43269 fs->root = old->root;
43270 path_get_longterm(&fs->root);
43271+ /* instead of calling gr_set_chroot_entries here,
43272+ we call it from every caller of this function
43273+ */
43274 fs->pwd = old->pwd;
43275 path_get_longterm(&fs->pwd);
43276 spin_unlock(&old->lock);
43277@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43278
43279 task_lock(current);
43280 spin_lock(&fs->lock);
43281- kill = !--fs->users;
43282+ kill = !atomic_dec_return(&fs->users);
43283 current->fs = new_fs;
43284+ gr_set_chroot_entries(current, &new_fs->root);
43285 spin_unlock(&fs->lock);
43286 task_unlock(current);
43287
43288@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43289
43290 /* to be mentioned only in INIT_TASK */
43291 struct fs_struct init_fs = {
43292- .users = 1,
43293+ .users = ATOMIC_INIT(1),
43294 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43295 .seq = SEQCNT_ZERO,
43296 .umask = 0022,
43297@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43298 task_lock(current);
43299
43300 spin_lock(&init_fs.lock);
43301- init_fs.users++;
43302+ atomic_inc(&init_fs.users);
43303 spin_unlock(&init_fs.lock);
43304
43305 spin_lock(&fs->lock);
43306 current->fs = &init_fs;
43307- kill = !--fs->users;
43308+ gr_set_chroot_entries(current, &current->fs->root);
43309+ kill = !atomic_dec_return(&fs->users);
43310 spin_unlock(&fs->lock);
43311
43312 task_unlock(current);
43313diff -urNp linux-3.0.7/fs/fuse/cuse.c linux-3.0.7/fs/fuse/cuse.c
43314--- linux-3.0.7/fs/fuse/cuse.c 2011-07-21 22:17:23.000000000 -0400
43315+++ linux-3.0.7/fs/fuse/cuse.c 2011-08-23 21:47:56.000000000 -0400
43316@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43317 INIT_LIST_HEAD(&cuse_conntbl[i]);
43318
43319 /* inherit and extend fuse_dev_operations */
43320- cuse_channel_fops = fuse_dev_operations;
43321- cuse_channel_fops.owner = THIS_MODULE;
43322- cuse_channel_fops.open = cuse_channel_open;
43323- cuse_channel_fops.release = cuse_channel_release;
43324+ pax_open_kernel();
43325+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43326+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43327+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43328+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43329+ pax_close_kernel();
43330
43331 cuse_class = class_create(THIS_MODULE, "cuse");
43332 if (IS_ERR(cuse_class))
43333diff -urNp linux-3.0.7/fs/fuse/dev.c linux-3.0.7/fs/fuse/dev.c
43334--- linux-3.0.7/fs/fuse/dev.c 2011-09-02 18:11:26.000000000 -0400
43335+++ linux-3.0.7/fs/fuse/dev.c 2011-08-29 23:26:27.000000000 -0400
43336@@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
43337 ret = 0;
43338 pipe_lock(pipe);
43339
43340- if (!pipe->readers) {
43341+ if (!atomic_read(&pipe->readers)) {
43342 send_sig(SIGPIPE, current, 0);
43343 if (!ret)
43344 ret = -EPIPE;
43345diff -urNp linux-3.0.7/fs/fuse/dir.c linux-3.0.7/fs/fuse/dir.c
43346--- linux-3.0.7/fs/fuse/dir.c 2011-07-21 22:17:23.000000000 -0400
43347+++ linux-3.0.7/fs/fuse/dir.c 2011-08-23 21:47:56.000000000 -0400
43348@@ -1148,7 +1148,7 @@ static char *read_link(struct dentry *de
43349 return link;
43350 }
43351
43352-static void free_link(char *link)
43353+static void free_link(const char *link)
43354 {
43355 if (!IS_ERR(link))
43356 free_page((unsigned long) link);
43357diff -urNp linux-3.0.7/fs/gfs2/inode.c linux-3.0.7/fs/gfs2/inode.c
43358--- linux-3.0.7/fs/gfs2/inode.c 2011-07-21 22:17:23.000000000 -0400
43359+++ linux-3.0.7/fs/gfs2/inode.c 2011-08-23 21:47:56.000000000 -0400
43360@@ -1525,7 +1525,7 @@ out:
43361
43362 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43363 {
43364- char *s = nd_get_link(nd);
43365+ const char *s = nd_get_link(nd);
43366 if (!IS_ERR(s))
43367 kfree(s);
43368 }
43369diff -urNp linux-3.0.7/fs/hfsplus/catalog.c linux-3.0.7/fs/hfsplus/catalog.c
43370--- linux-3.0.7/fs/hfsplus/catalog.c 2011-07-21 22:17:23.000000000 -0400
43371+++ linux-3.0.7/fs/hfsplus/catalog.c 2011-08-23 21:48:14.000000000 -0400
43372@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43373 int err;
43374 u16 type;
43375
43376+ pax_track_stack();
43377+
43378 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43379 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43380 if (err)
43381@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43382 int entry_size;
43383 int err;
43384
43385+ pax_track_stack();
43386+
43387 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43388 str->name, cnid, inode->i_nlink);
43389 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43390@@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
43391 int entry_size, type;
43392 int err = 0;
43393
43394+ pax_track_stack();
43395+
43396 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43397 cnid, src_dir->i_ino, src_name->name,
43398 dst_dir->i_ino, dst_name->name);
43399diff -urNp linux-3.0.7/fs/hfsplus/dir.c linux-3.0.7/fs/hfsplus/dir.c
43400--- linux-3.0.7/fs/hfsplus/dir.c 2011-07-21 22:17:23.000000000 -0400
43401+++ linux-3.0.7/fs/hfsplus/dir.c 2011-08-23 21:48:14.000000000 -0400
43402@@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
43403 struct hfsplus_readdir_data *rd;
43404 u16 type;
43405
43406+ pax_track_stack();
43407+
43408 if (filp->f_pos >= inode->i_size)
43409 return 0;
43410
43411diff -urNp linux-3.0.7/fs/hfsplus/inode.c linux-3.0.7/fs/hfsplus/inode.c
43412--- linux-3.0.7/fs/hfsplus/inode.c 2011-07-21 22:17:23.000000000 -0400
43413+++ linux-3.0.7/fs/hfsplus/inode.c 2011-08-23 21:48:14.000000000 -0400
43414@@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
43415 int res = 0;
43416 u16 type;
43417
43418+ pax_track_stack();
43419+
43420 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43421
43422 HFSPLUS_I(inode)->linkid = 0;
43423@@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
43424 struct hfs_find_data fd;
43425 hfsplus_cat_entry entry;
43426
43427+ pax_track_stack();
43428+
43429 if (HFSPLUS_IS_RSRC(inode))
43430 main_inode = HFSPLUS_I(inode)->rsrc_inode;
43431
43432diff -urNp linux-3.0.7/fs/hfsplus/ioctl.c linux-3.0.7/fs/hfsplus/ioctl.c
43433--- linux-3.0.7/fs/hfsplus/ioctl.c 2011-07-21 22:17:23.000000000 -0400
43434+++ linux-3.0.7/fs/hfsplus/ioctl.c 2011-08-23 21:48:14.000000000 -0400
43435@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43436 struct hfsplus_cat_file *file;
43437 int res;
43438
43439+ pax_track_stack();
43440+
43441 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43442 return -EOPNOTSUPP;
43443
43444@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43445 struct hfsplus_cat_file *file;
43446 ssize_t res = 0;
43447
43448+ pax_track_stack();
43449+
43450 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43451 return -EOPNOTSUPP;
43452
43453diff -urNp linux-3.0.7/fs/hfsplus/super.c linux-3.0.7/fs/hfsplus/super.c
43454--- linux-3.0.7/fs/hfsplus/super.c 2011-07-21 22:17:23.000000000 -0400
43455+++ linux-3.0.7/fs/hfsplus/super.c 2011-08-23 21:48:14.000000000 -0400
43456@@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
43457 struct nls_table *nls = NULL;
43458 int err;
43459
43460+ pax_track_stack();
43461+
43462 err = -EINVAL;
43463 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43464 if (!sbi)
43465diff -urNp linux-3.0.7/fs/hugetlbfs/inode.c linux-3.0.7/fs/hugetlbfs/inode.c
43466--- linux-3.0.7/fs/hugetlbfs/inode.c 2011-07-21 22:17:23.000000000 -0400
43467+++ linux-3.0.7/fs/hugetlbfs/inode.c 2011-08-23 21:48:14.000000000 -0400
43468@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
43469 .kill_sb = kill_litter_super,
43470 };
43471
43472-static struct vfsmount *hugetlbfs_vfsmount;
43473+struct vfsmount *hugetlbfs_vfsmount;
43474
43475 static int can_do_hugetlb_shm(void)
43476 {
43477diff -urNp linux-3.0.7/fs/inode.c linux-3.0.7/fs/inode.c
43478--- linux-3.0.7/fs/inode.c 2011-07-21 22:17:23.000000000 -0400
43479+++ linux-3.0.7/fs/inode.c 2011-08-23 21:47:56.000000000 -0400
43480@@ -829,8 +829,8 @@ unsigned int get_next_ino(void)
43481
43482 #ifdef CONFIG_SMP
43483 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
43484- static atomic_t shared_last_ino;
43485- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
43486+ static atomic_unchecked_t shared_last_ino;
43487+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
43488
43489 res = next - LAST_INO_BATCH;
43490 }
43491diff -urNp linux-3.0.7/fs/jbd/checkpoint.c linux-3.0.7/fs/jbd/checkpoint.c
43492--- linux-3.0.7/fs/jbd/checkpoint.c 2011-07-21 22:17:23.000000000 -0400
43493+++ linux-3.0.7/fs/jbd/checkpoint.c 2011-08-23 21:48:14.000000000 -0400
43494@@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
43495 tid_t this_tid;
43496 int result;
43497
43498+ pax_track_stack();
43499+
43500 jbd_debug(1, "Start checkpoint\n");
43501
43502 /*
43503diff -urNp linux-3.0.7/fs/jffs2/compr_rtime.c linux-3.0.7/fs/jffs2/compr_rtime.c
43504--- linux-3.0.7/fs/jffs2/compr_rtime.c 2011-07-21 22:17:23.000000000 -0400
43505+++ linux-3.0.7/fs/jffs2/compr_rtime.c 2011-08-23 21:48:14.000000000 -0400
43506@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43507 int outpos = 0;
43508 int pos=0;
43509
43510+ pax_track_stack();
43511+
43512 memset(positions,0,sizeof(positions));
43513
43514 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43515@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
43516 int outpos = 0;
43517 int pos=0;
43518
43519+ pax_track_stack();
43520+
43521 memset(positions,0,sizeof(positions));
43522
43523 while (outpos<destlen) {
43524diff -urNp linux-3.0.7/fs/jffs2/compr_rubin.c linux-3.0.7/fs/jffs2/compr_rubin.c
43525--- linux-3.0.7/fs/jffs2/compr_rubin.c 2011-07-21 22:17:23.000000000 -0400
43526+++ linux-3.0.7/fs/jffs2/compr_rubin.c 2011-08-23 21:48:14.000000000 -0400
43527@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43528 int ret;
43529 uint32_t mysrclen, mydstlen;
43530
43531+ pax_track_stack();
43532+
43533 mysrclen = *sourcelen;
43534 mydstlen = *dstlen - 8;
43535
43536diff -urNp linux-3.0.7/fs/jffs2/erase.c linux-3.0.7/fs/jffs2/erase.c
43537--- linux-3.0.7/fs/jffs2/erase.c 2011-07-21 22:17:23.000000000 -0400
43538+++ linux-3.0.7/fs/jffs2/erase.c 2011-08-23 21:47:56.000000000 -0400
43539@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
43540 struct jffs2_unknown_node marker = {
43541 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43542 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43543- .totlen = cpu_to_je32(c->cleanmarker_size)
43544+ .totlen = cpu_to_je32(c->cleanmarker_size),
43545+ .hdr_crc = cpu_to_je32(0)
43546 };
43547
43548 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43549diff -urNp linux-3.0.7/fs/jffs2/wbuf.c linux-3.0.7/fs/jffs2/wbuf.c
43550--- linux-3.0.7/fs/jffs2/wbuf.c 2011-07-21 22:17:23.000000000 -0400
43551+++ linux-3.0.7/fs/jffs2/wbuf.c 2011-08-23 21:47:56.000000000 -0400
43552@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43553 {
43554 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43555 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43556- .totlen = constant_cpu_to_je32(8)
43557+ .totlen = constant_cpu_to_je32(8),
43558+ .hdr_crc = constant_cpu_to_je32(0)
43559 };
43560
43561 /*
43562diff -urNp linux-3.0.7/fs/jffs2/xattr.c linux-3.0.7/fs/jffs2/xattr.c
43563--- linux-3.0.7/fs/jffs2/xattr.c 2011-07-21 22:17:23.000000000 -0400
43564+++ linux-3.0.7/fs/jffs2/xattr.c 2011-08-23 21:48:14.000000000 -0400
43565@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43566
43567 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43568
43569+ pax_track_stack();
43570+
43571 /* Phase.1 : Merge same xref */
43572 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43573 xref_tmphash[i] = NULL;
43574diff -urNp linux-3.0.7/fs/jfs/super.c linux-3.0.7/fs/jfs/super.c
43575--- linux-3.0.7/fs/jfs/super.c 2011-07-21 22:17:23.000000000 -0400
43576+++ linux-3.0.7/fs/jfs/super.c 2011-08-23 21:47:56.000000000 -0400
43577@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
43578
43579 jfs_inode_cachep =
43580 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43581- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43582+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43583 init_once);
43584 if (jfs_inode_cachep == NULL)
43585 return -ENOMEM;
43586diff -urNp linux-3.0.7/fs/Kconfig.binfmt linux-3.0.7/fs/Kconfig.binfmt
43587--- linux-3.0.7/fs/Kconfig.binfmt 2011-07-21 22:17:23.000000000 -0400
43588+++ linux-3.0.7/fs/Kconfig.binfmt 2011-08-23 21:47:56.000000000 -0400
43589@@ -86,7 +86,7 @@ config HAVE_AOUT
43590
43591 config BINFMT_AOUT
43592 tristate "Kernel support for a.out and ECOFF binaries"
43593- depends on HAVE_AOUT
43594+ depends on HAVE_AOUT && BROKEN
43595 ---help---
43596 A.out (Assembler.OUTput) is a set of formats for libraries and
43597 executables used in the earliest versions of UNIX. Linux used
43598diff -urNp linux-3.0.7/fs/libfs.c linux-3.0.7/fs/libfs.c
43599--- linux-3.0.7/fs/libfs.c 2011-07-21 22:17:23.000000000 -0400
43600+++ linux-3.0.7/fs/libfs.c 2011-08-23 21:47:56.000000000 -0400
43601@@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
43602
43603 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43604 struct dentry *next;
43605+ char d_name[sizeof(next->d_iname)];
43606+ const unsigned char *name;
43607+
43608 next = list_entry(p, struct dentry, d_u.d_child);
43609 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
43610 if (!simple_positive(next)) {
43611@@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
43612
43613 spin_unlock(&next->d_lock);
43614 spin_unlock(&dentry->d_lock);
43615- if (filldir(dirent, next->d_name.name,
43616+ name = next->d_name.name;
43617+ if (name == next->d_iname) {
43618+ memcpy(d_name, name, next->d_name.len);
43619+ name = d_name;
43620+ }
43621+ if (filldir(dirent, name,
43622 next->d_name.len, filp->f_pos,
43623 next->d_inode->i_ino,
43624 dt_type(next->d_inode)) < 0)
43625diff -urNp linux-3.0.7/fs/lockd/clntproc.c linux-3.0.7/fs/lockd/clntproc.c
43626--- linux-3.0.7/fs/lockd/clntproc.c 2011-07-21 22:17:23.000000000 -0400
43627+++ linux-3.0.7/fs/lockd/clntproc.c 2011-08-23 21:48:14.000000000 -0400
43628@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43629 /*
43630 * Cookie counter for NLM requests
43631 */
43632-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43633+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43634
43635 void nlmclnt_next_cookie(struct nlm_cookie *c)
43636 {
43637- u32 cookie = atomic_inc_return(&nlm_cookie);
43638+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43639
43640 memcpy(c->data, &cookie, 4);
43641 c->len=4;
43642@@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43643 struct nlm_rqst reqst, *req;
43644 int status;
43645
43646+ pax_track_stack();
43647+
43648 req = &reqst;
43649 memset(req, 0, sizeof(*req));
43650 locks_init_lock(&req->a_args.lock.fl);
43651diff -urNp linux-3.0.7/fs/locks.c linux-3.0.7/fs/locks.c
43652--- linux-3.0.7/fs/locks.c 2011-07-21 22:17:23.000000000 -0400
43653+++ linux-3.0.7/fs/locks.c 2011-08-23 21:47:56.000000000 -0400
43654@@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
43655 return;
43656
43657 if (filp->f_op && filp->f_op->flock) {
43658- struct file_lock fl = {
43659+ struct file_lock flock = {
43660 .fl_pid = current->tgid,
43661 .fl_file = filp,
43662 .fl_flags = FL_FLOCK,
43663 .fl_type = F_UNLCK,
43664 .fl_end = OFFSET_MAX,
43665 };
43666- filp->f_op->flock(filp, F_SETLKW, &fl);
43667- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43668- fl.fl_ops->fl_release_private(&fl);
43669+ filp->f_op->flock(filp, F_SETLKW, &flock);
43670+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43671+ flock.fl_ops->fl_release_private(&flock);
43672 }
43673
43674 lock_flocks();
43675diff -urNp linux-3.0.7/fs/logfs/super.c linux-3.0.7/fs/logfs/super.c
43676--- linux-3.0.7/fs/logfs/super.c 2011-07-21 22:17:23.000000000 -0400
43677+++ linux-3.0.7/fs/logfs/super.c 2011-08-23 21:48:14.000000000 -0400
43678@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
43679 struct logfs_disk_super _ds1, *ds1 = &_ds1;
43680 int err, valid0, valid1;
43681
43682+ pax_track_stack();
43683+
43684 /* read first superblock */
43685 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
43686 if (err)
43687diff -urNp linux-3.0.7/fs/namei.c linux-3.0.7/fs/namei.c
43688--- linux-3.0.7/fs/namei.c 2011-10-16 21:54:54.000000000 -0400
43689+++ linux-3.0.7/fs/namei.c 2011-10-19 06:12:36.000000000 -0400
43690@@ -237,21 +237,23 @@ int generic_permission(struct inode *ino
43691 return ret;
43692
43693 /*
43694- * Read/write DACs are always overridable.
43695- * Executable DACs are overridable for all directories and
43696- * for non-directories that have least one exec bit set.
43697+ * Searching includes executable on directories, else just read.
43698 */
43699- if (!(mask & MAY_EXEC) || execute_ok(inode))
43700- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43701+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43702+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
43703+ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43704 return 0;
43705+ }
43706
43707 /*
43708- * Searching includes executable on directories, else just read.
43709+ * Read/write DACs are always overridable.
43710+ * Executable DACs are overridable for all directories and
43711+ * for non-directories that have least one exec bit set.
43712 */
43713- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43714- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
43715- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
43716+ if (!(mask & MAY_EXEC) || execute_ok(inode)) {
43717+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
43718 return 0;
43719+ }
43720
43721 return -EACCES;
43722 }
43723@@ -593,9 +595,12 @@ static inline int exec_permission(struct
43724 if (ret == -ECHILD)
43725 return ret;
43726
43727- if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
43728- ns_capable(ns, CAP_DAC_READ_SEARCH))
43729+ if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
43730 goto ok;
43731+ else {
43732+ if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
43733+ goto ok;
43734+ }
43735
43736 return ret;
43737 ok:
43738@@ -703,11 +708,26 @@ follow_link(struct path *link, struct na
43739 return error;
43740 }
43741
43742+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
43743+ dentry->d_inode, dentry, nd->path.mnt)) {
43744+ error = -EACCES;
43745+ *p = ERR_PTR(error); /* no ->put_link(), please */
43746+ path_put(&nd->path);
43747+ return error;
43748+ }
43749+
43750+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
43751+ error = -ENOENT;
43752+ *p = ERR_PTR(error); /* no ->put_link(), please */
43753+ path_put(&nd->path);
43754+ return error;
43755+ }
43756+
43757 nd->last_type = LAST_BIND;
43758 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
43759 error = PTR_ERR(*p);
43760 if (!IS_ERR(*p)) {
43761- char *s = nd_get_link(nd);
43762+ const char *s = nd_get_link(nd);
43763 error = 0;
43764 if (s)
43765 error = __vfs_follow_link(nd, s);
43766@@ -1598,6 +1618,12 @@ static int path_lookupat(int dfd, const
43767 if (!err)
43768 err = complete_walk(nd);
43769
43770+ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43771+ if (!err)
43772+ path_put(&nd->path);
43773+ err = -ENOENT;
43774+ }
43775+
43776 if (!err && nd->flags & LOOKUP_DIRECTORY) {
43777 if (!nd->inode->i_op->lookup) {
43778 path_put(&nd->path);
43779@@ -1625,6 +1651,9 @@ static int do_path_lookup(int dfd, const
43780 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
43781
43782 if (likely(!retval)) {
43783+ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43784+ return -ENOENT;
43785+
43786 if (unlikely(!audit_dummy_context())) {
43787 if (nd->path.dentry && nd->inode)
43788 audit_inode(name, nd->path.dentry);
43789@@ -1935,6 +1964,30 @@ int vfs_create(struct inode *dir, struct
43790 return error;
43791 }
43792
43793+/*
43794+ * Note that while the flag value (low two bits) for sys_open means:
43795+ * 00 - read-only
43796+ * 01 - write-only
43797+ * 10 - read-write
43798+ * 11 - special
43799+ * it is changed into
43800+ * 00 - no permissions needed
43801+ * 01 - read-permission
43802+ * 10 - write-permission
43803+ * 11 - read-write
43804+ * for the internal routines (ie open_namei()/follow_link() etc)
43805+ * This is more logical, and also allows the 00 "no perm needed"
43806+ * to be used for symlinks (where the permissions are checked
43807+ * later).
43808+ *
43809+*/
43810+static inline int open_to_namei_flags(int flag)
43811+{
43812+ if ((flag+1) & O_ACCMODE)
43813+ flag++;
43814+ return flag;
43815+}
43816+
43817 static int may_open(struct path *path, int acc_mode, int flag)
43818 {
43819 struct dentry *dentry = path->dentry;
43820@@ -1987,7 +2040,27 @@ static int may_open(struct path *path, i
43821 /*
43822 * Ensure there are no outstanding leases on the file.
43823 */
43824- return break_lease(inode, flag);
43825+ error = break_lease(inode, flag);
43826+
43827+ if (error)
43828+ return error;
43829+
43830+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43831+ error = -EPERM;
43832+ goto exit;
43833+ }
43834+
43835+ if (gr_handle_rawio(inode)) {
43836+ error = -EPERM;
43837+ goto exit;
43838+ }
43839+
43840+ if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
43841+ error = -EACCES;
43842+ goto exit;
43843+ }
43844+exit:
43845+ return error;
43846 }
43847
43848 static int handle_truncate(struct file *filp)
43849@@ -2013,30 +2086,6 @@ static int handle_truncate(struct file *
43850 }
43851
43852 /*
43853- * Note that while the flag value (low two bits) for sys_open means:
43854- * 00 - read-only
43855- * 01 - write-only
43856- * 10 - read-write
43857- * 11 - special
43858- * it is changed into
43859- * 00 - no permissions needed
43860- * 01 - read-permission
43861- * 10 - write-permission
43862- * 11 - read-write
43863- * for the internal routines (ie open_namei()/follow_link() etc)
43864- * This is more logical, and also allows the 00 "no perm needed"
43865- * to be used for symlinks (where the permissions are checked
43866- * later).
43867- *
43868-*/
43869-static inline int open_to_namei_flags(int flag)
43870-{
43871- if ((flag+1) & O_ACCMODE)
43872- flag++;
43873- return flag;
43874-}
43875-
43876-/*
43877 * Handle the last step of open()
43878 */
43879 static struct file *do_last(struct nameidata *nd, struct path *path,
43880@@ -2045,6 +2094,7 @@ static struct file *do_last(struct namei
43881 struct dentry *dir = nd->path.dentry;
43882 struct dentry *dentry;
43883 int open_flag = op->open_flag;
43884+ int flag = open_to_namei_flags(open_flag);
43885 int will_truncate = open_flag & O_TRUNC;
43886 int want_write = 0;
43887 int acc_mode = op->acc_mode;
43888@@ -2065,6 +2115,10 @@ static struct file *do_last(struct namei
43889 error = complete_walk(nd);
43890 if (error)
43891 return ERR_PTR(error);
43892+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43893+ error = -ENOENT;
43894+ goto exit;
43895+ }
43896 audit_inode(pathname, nd->path.dentry);
43897 if (open_flag & O_CREAT) {
43898 error = -EISDIR;
43899@@ -2075,6 +2129,10 @@ static struct file *do_last(struct namei
43900 error = complete_walk(nd);
43901 if (error)
43902 return ERR_PTR(error);
43903+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
43904+ error = -ENOENT;
43905+ goto exit;
43906+ }
43907 audit_inode(pathname, dir);
43908 goto ok;
43909 }
43910@@ -2097,6 +2155,11 @@ static struct file *do_last(struct namei
43911 if (error)
43912 return ERR_PTR(-ECHILD);
43913
43914+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43915+ error = -ENOENT;
43916+ goto exit;
43917+ }
43918+
43919 error = -ENOTDIR;
43920 if (nd->flags & LOOKUP_DIRECTORY) {
43921 if (!nd->inode->i_op->lookup)
43922@@ -2132,6 +2195,12 @@ static struct file *do_last(struct namei
43923 /* Negative dentry, just create the file */
43924 if (!dentry->d_inode) {
43925 int mode = op->mode;
43926+
43927+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
43928+ error = -EACCES;
43929+ goto exit_mutex_unlock;
43930+ }
43931+
43932 if (!IS_POSIXACL(dir->d_inode))
43933 mode &= ~current_umask();
43934 /*
43935@@ -2155,6 +2224,8 @@ static struct file *do_last(struct namei
43936 error = vfs_create(dir->d_inode, dentry, mode, nd);
43937 if (error)
43938 goto exit_mutex_unlock;
43939+ else
43940+ gr_handle_create(path->dentry, path->mnt);
43941 mutex_unlock(&dir->d_inode->i_mutex);
43942 dput(nd->path.dentry);
43943 nd->path.dentry = dentry;
43944@@ -2164,6 +2235,19 @@ static struct file *do_last(struct namei
43945 /*
43946 * It already exists.
43947 */
43948+
43949+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
43950+ error = -ENOENT;
43951+ goto exit_mutex_unlock;
43952+ }
43953+
43954+ /* only check if O_CREAT is specified, all other checks need to go
43955+ into may_open */
43956+ if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
43957+ error = -EACCES;
43958+ goto exit_mutex_unlock;
43959+ }
43960+
43961 mutex_unlock(&dir->d_inode->i_mutex);
43962 audit_inode(pathname, path->dentry);
43963
43964@@ -2373,6 +2457,10 @@ struct dentry *lookup_create(struct name
43965 }
43966 return dentry;
43967 eexist:
43968+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
43969+ dput(dentry);
43970+ return ERR_PTR(-ENOENT);
43971+ }
43972 dput(dentry);
43973 dentry = ERR_PTR(-EEXIST);
43974 fail:
43975@@ -2450,6 +2538,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43976 error = may_mknod(mode);
43977 if (error)
43978 goto out_dput;
43979+
43980+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43981+ error = -EPERM;
43982+ goto out_dput;
43983+ }
43984+
43985+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43986+ error = -EACCES;
43987+ goto out_dput;
43988+ }
43989+
43990 error = mnt_want_write(nd.path.mnt);
43991 if (error)
43992 goto out_dput;
43993@@ -2470,6 +2569,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43994 }
43995 out_drop_write:
43996 mnt_drop_write(nd.path.mnt);
43997+
43998+ if (!error)
43999+ gr_handle_create(dentry, nd.path.mnt);
44000 out_dput:
44001 dput(dentry);
44002 out_unlock:
44003@@ -2522,6 +2624,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44004 if (IS_ERR(dentry))
44005 goto out_unlock;
44006
44007+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44008+ error = -EACCES;
44009+ goto out_dput;
44010+ }
44011+
44012 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44013 mode &= ~current_umask();
44014 error = mnt_want_write(nd.path.mnt);
44015@@ -2533,6 +2640,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44016 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44017 out_drop_write:
44018 mnt_drop_write(nd.path.mnt);
44019+
44020+ if (!error)
44021+ gr_handle_create(dentry, nd.path.mnt);
44022+
44023 out_dput:
44024 dput(dentry);
44025 out_unlock:
44026@@ -2615,6 +2726,8 @@ static long do_rmdir(int dfd, const char
44027 char * name;
44028 struct dentry *dentry;
44029 struct nameidata nd;
44030+ ino_t saved_ino = 0;
44031+ dev_t saved_dev = 0;
44032
44033 error = user_path_parent(dfd, pathname, &nd, &name);
44034 if (error)
44035@@ -2643,6 +2756,17 @@ static long do_rmdir(int dfd, const char
44036 error = -ENOENT;
44037 goto exit3;
44038 }
44039+
44040+ if (dentry->d_inode->i_nlink <= 1) {
44041+ saved_ino = dentry->d_inode->i_ino;
44042+ saved_dev = gr_get_dev_from_dentry(dentry);
44043+ }
44044+
44045+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44046+ error = -EACCES;
44047+ goto exit3;
44048+ }
44049+
44050 error = mnt_want_write(nd.path.mnt);
44051 if (error)
44052 goto exit3;
44053@@ -2650,6 +2774,8 @@ static long do_rmdir(int dfd, const char
44054 if (error)
44055 goto exit4;
44056 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44057+ if (!error && (saved_dev || saved_ino))
44058+ gr_handle_delete(saved_ino, saved_dev);
44059 exit4:
44060 mnt_drop_write(nd.path.mnt);
44061 exit3:
44062@@ -2712,6 +2838,8 @@ static long do_unlinkat(int dfd, const c
44063 struct dentry *dentry;
44064 struct nameidata nd;
44065 struct inode *inode = NULL;
44066+ ino_t saved_ino = 0;
44067+ dev_t saved_dev = 0;
44068
44069 error = user_path_parent(dfd, pathname, &nd, &name);
44070 if (error)
44071@@ -2734,6 +2862,16 @@ static long do_unlinkat(int dfd, const c
44072 if (!inode)
44073 goto slashes;
44074 ihold(inode);
44075+
44076+ if (inode->i_nlink <= 1) {
44077+ saved_ino = inode->i_ino;
44078+ saved_dev = gr_get_dev_from_dentry(dentry);
44079+ }
44080+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44081+ error = -EACCES;
44082+ goto exit2;
44083+ }
44084+
44085 error = mnt_want_write(nd.path.mnt);
44086 if (error)
44087 goto exit2;
44088@@ -2741,6 +2879,8 @@ static long do_unlinkat(int dfd, const c
44089 if (error)
44090 goto exit3;
44091 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44092+ if (!error && (saved_ino || saved_dev))
44093+ gr_handle_delete(saved_ino, saved_dev);
44094 exit3:
44095 mnt_drop_write(nd.path.mnt);
44096 exit2:
44097@@ -2818,6 +2958,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44098 if (IS_ERR(dentry))
44099 goto out_unlock;
44100
44101+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44102+ error = -EACCES;
44103+ goto out_dput;
44104+ }
44105+
44106 error = mnt_want_write(nd.path.mnt);
44107 if (error)
44108 goto out_dput;
44109@@ -2825,6 +2970,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44110 if (error)
44111 goto out_drop_write;
44112 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44113+ if (!error)
44114+ gr_handle_create(dentry, nd.path.mnt);
44115 out_drop_write:
44116 mnt_drop_write(nd.path.mnt);
44117 out_dput:
44118@@ -2933,6 +3080,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44119 error = PTR_ERR(new_dentry);
44120 if (IS_ERR(new_dentry))
44121 goto out_unlock;
44122+
44123+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44124+ old_path.dentry->d_inode,
44125+ old_path.dentry->d_inode->i_mode, to)) {
44126+ error = -EACCES;
44127+ goto out_dput;
44128+ }
44129+
44130+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44131+ old_path.dentry, old_path.mnt, to)) {
44132+ error = -EACCES;
44133+ goto out_dput;
44134+ }
44135+
44136 error = mnt_want_write(nd.path.mnt);
44137 if (error)
44138 goto out_dput;
44139@@ -2940,6 +3101,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44140 if (error)
44141 goto out_drop_write;
44142 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44143+ if (!error)
44144+ gr_handle_create(new_dentry, nd.path.mnt);
44145 out_drop_write:
44146 mnt_drop_write(nd.path.mnt);
44147 out_dput:
44148@@ -3117,6 +3280,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44149 char *to;
44150 int error;
44151
44152+ pax_track_stack();
44153+
44154 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44155 if (error)
44156 goto exit;
44157@@ -3173,6 +3338,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44158 if (new_dentry == trap)
44159 goto exit5;
44160
44161+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44162+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44163+ to);
44164+ if (error)
44165+ goto exit5;
44166+
44167 error = mnt_want_write(oldnd.path.mnt);
44168 if (error)
44169 goto exit5;
44170@@ -3182,6 +3353,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44171 goto exit6;
44172 error = vfs_rename(old_dir->d_inode, old_dentry,
44173 new_dir->d_inode, new_dentry);
44174+ if (!error)
44175+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44176+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44177 exit6:
44178 mnt_drop_write(oldnd.path.mnt);
44179 exit5:
44180@@ -3207,6 +3381,8 @@ SYSCALL_DEFINE2(rename, const char __use
44181
44182 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44183 {
44184+ char tmpbuf[64];
44185+ const char *newlink;
44186 int len;
44187
44188 len = PTR_ERR(link);
44189@@ -3216,7 +3392,14 @@ int vfs_readlink(struct dentry *dentry,
44190 len = strlen(link);
44191 if (len > (unsigned) buflen)
44192 len = buflen;
44193- if (copy_to_user(buffer, link, len))
44194+
44195+ if (len < sizeof(tmpbuf)) {
44196+ memcpy(tmpbuf, link, len);
44197+ newlink = tmpbuf;
44198+ } else
44199+ newlink = link;
44200+
44201+ if (copy_to_user(buffer, newlink, len))
44202 len = -EFAULT;
44203 out:
44204 return len;
44205diff -urNp linux-3.0.7/fs/namespace.c linux-3.0.7/fs/namespace.c
44206--- linux-3.0.7/fs/namespace.c 2011-07-21 22:17:23.000000000 -0400
44207+++ linux-3.0.7/fs/namespace.c 2011-08-23 21:48:14.000000000 -0400
44208@@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
44209 if (!(sb->s_flags & MS_RDONLY))
44210 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44211 up_write(&sb->s_umount);
44212+
44213+ gr_log_remount(mnt->mnt_devname, retval);
44214+
44215 return retval;
44216 }
44217
44218@@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
44219 br_write_unlock(vfsmount_lock);
44220 up_write(&namespace_sem);
44221 release_mounts(&umount_list);
44222+
44223+ gr_log_unmount(mnt->mnt_devname, retval);
44224+
44225 return retval;
44226 }
44227
44228@@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
44229 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44230 MS_STRICTATIME);
44231
44232+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44233+ retval = -EPERM;
44234+ goto dput_out;
44235+ }
44236+
44237+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44238+ retval = -EPERM;
44239+ goto dput_out;
44240+ }
44241+
44242 if (flags & MS_REMOUNT)
44243 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44244 data_page);
44245@@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
44246 dev_name, data_page);
44247 dput_out:
44248 path_put(&path);
44249+
44250+ gr_log_mount(dev_name, dir_name, retval);
44251+
44252 return retval;
44253 }
44254
44255@@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44256 if (error)
44257 goto out2;
44258
44259+ if (gr_handle_chroot_pivot()) {
44260+ error = -EPERM;
44261+ goto out2;
44262+ }
44263+
44264 get_fs_root(current->fs, &root);
44265 error = lock_mount(&old);
44266 if (error)
44267diff -urNp linux-3.0.7/fs/ncpfs/dir.c linux-3.0.7/fs/ncpfs/dir.c
44268--- linux-3.0.7/fs/ncpfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44269+++ linux-3.0.7/fs/ncpfs/dir.c 2011-08-23 21:48:14.000000000 -0400
44270@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44271 int res, val = 0, len;
44272 __u8 __name[NCP_MAXPATHLEN + 1];
44273
44274+ pax_track_stack();
44275+
44276 if (dentry == dentry->d_sb->s_root)
44277 return 1;
44278
44279@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44280 int error, res, len;
44281 __u8 __name[NCP_MAXPATHLEN + 1];
44282
44283+ pax_track_stack();
44284+
44285 error = -EIO;
44286 if (!ncp_conn_valid(server))
44287 goto finished;
44288@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44289 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44290 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44291
44292+ pax_track_stack();
44293+
44294 ncp_age_dentry(server, dentry);
44295 len = sizeof(__name);
44296 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44297@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44298 int error, len;
44299 __u8 __name[NCP_MAXPATHLEN + 1];
44300
44301+ pax_track_stack();
44302+
44303 DPRINTK("ncp_mkdir: making %s/%s\n",
44304 dentry->d_parent->d_name.name, dentry->d_name.name);
44305
44306@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44307 int old_len, new_len;
44308 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44309
44310+ pax_track_stack();
44311+
44312 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44313 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44314 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44315diff -urNp linux-3.0.7/fs/ncpfs/inode.c linux-3.0.7/fs/ncpfs/inode.c
44316--- linux-3.0.7/fs/ncpfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44317+++ linux-3.0.7/fs/ncpfs/inode.c 2011-08-23 21:48:14.000000000 -0400
44318@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44319 #endif
44320 struct ncp_entry_info finfo;
44321
44322+ pax_track_stack();
44323+
44324 memset(&data, 0, sizeof(data));
44325 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44326 if (!server)
44327diff -urNp linux-3.0.7/fs/nfs/inode.c linux-3.0.7/fs/nfs/inode.c
44328--- linux-3.0.7/fs/nfs/inode.c 2011-07-21 22:17:23.000000000 -0400
44329+++ linux-3.0.7/fs/nfs/inode.c 2011-08-23 21:47:56.000000000 -0400
44330@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44331 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44332 nfsi->attrtimeo_timestamp = jiffies;
44333
44334- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44335+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44336 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44337 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44338 else
44339@@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
44340 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44341 }
44342
44343-static atomic_long_t nfs_attr_generation_counter;
44344+static atomic_long_unchecked_t nfs_attr_generation_counter;
44345
44346 static unsigned long nfs_read_attr_generation_counter(void)
44347 {
44348- return atomic_long_read(&nfs_attr_generation_counter);
44349+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44350 }
44351
44352 unsigned long nfs_inc_attr_generation_counter(void)
44353 {
44354- return atomic_long_inc_return(&nfs_attr_generation_counter);
44355+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44356 }
44357
44358 void nfs_fattr_init(struct nfs_fattr *fattr)
44359diff -urNp linux-3.0.7/fs/nfsd/nfs4state.c linux-3.0.7/fs/nfsd/nfs4state.c
44360--- linux-3.0.7/fs/nfsd/nfs4state.c 2011-09-02 18:11:21.000000000 -0400
44361+++ linux-3.0.7/fs/nfsd/nfs4state.c 2011-08-23 21:48:14.000000000 -0400
44362@@ -3794,6 +3794,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44363 unsigned int strhashval;
44364 int err;
44365
44366+ pax_track_stack();
44367+
44368 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44369 (long long) lock->lk_offset,
44370 (long long) lock->lk_length);
44371diff -urNp linux-3.0.7/fs/nfsd/nfs4xdr.c linux-3.0.7/fs/nfsd/nfs4xdr.c
44372--- linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-07-21 22:17:23.000000000 -0400
44373+++ linux-3.0.7/fs/nfsd/nfs4xdr.c 2011-08-23 21:48:14.000000000 -0400
44374@@ -1788,6 +1788,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44375 .dentry = dentry,
44376 };
44377
44378+ pax_track_stack();
44379+
44380 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44381 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44382 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44383diff -urNp linux-3.0.7/fs/nfsd/vfs.c linux-3.0.7/fs/nfsd/vfs.c
44384--- linux-3.0.7/fs/nfsd/vfs.c 2011-07-21 22:17:23.000000000 -0400
44385+++ linux-3.0.7/fs/nfsd/vfs.c 2011-10-06 04:17:55.000000000 -0400
44386@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44387 } else {
44388 oldfs = get_fs();
44389 set_fs(KERNEL_DS);
44390- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44391+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44392 set_fs(oldfs);
44393 }
44394
44395@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44396
44397 /* Write the data. */
44398 oldfs = get_fs(); set_fs(KERNEL_DS);
44399- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44400+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44401 set_fs(oldfs);
44402 if (host_err < 0)
44403 goto out_nfserr;
44404@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44405 */
44406
44407 oldfs = get_fs(); set_fs(KERNEL_DS);
44408- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44409+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44410 set_fs(oldfs);
44411
44412 if (host_err < 0)
44413diff -urNp linux-3.0.7/fs/notify/fanotify/fanotify_user.c linux-3.0.7/fs/notify/fanotify/fanotify_user.c
44414--- linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-07-21 22:17:23.000000000 -0400
44415+++ linux-3.0.7/fs/notify/fanotify/fanotify_user.c 2011-08-23 21:48:14.000000000 -0400
44416@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44417 goto out_close_fd;
44418
44419 ret = -EFAULT;
44420- if (copy_to_user(buf, &fanotify_event_metadata,
44421+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44422+ copy_to_user(buf, &fanotify_event_metadata,
44423 fanotify_event_metadata.event_len))
44424 goto out_kill_access_response;
44425
44426diff -urNp linux-3.0.7/fs/notify/notification.c linux-3.0.7/fs/notify/notification.c
44427--- linux-3.0.7/fs/notify/notification.c 2011-07-21 22:17:23.000000000 -0400
44428+++ linux-3.0.7/fs/notify/notification.c 2011-08-23 21:47:56.000000000 -0400
44429@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44430 * get set to 0 so it will never get 'freed'
44431 */
44432 static struct fsnotify_event *q_overflow_event;
44433-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44434+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44435
44436 /**
44437 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44438@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44439 */
44440 u32 fsnotify_get_cookie(void)
44441 {
44442- return atomic_inc_return(&fsnotify_sync_cookie);
44443+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44444 }
44445 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44446
44447diff -urNp linux-3.0.7/fs/ntfs/dir.c linux-3.0.7/fs/ntfs/dir.c
44448--- linux-3.0.7/fs/ntfs/dir.c 2011-07-21 22:17:23.000000000 -0400
44449+++ linux-3.0.7/fs/ntfs/dir.c 2011-08-23 21:47:56.000000000 -0400
44450@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44451 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44452 ~(s64)(ndir->itype.index.block_size - 1)));
44453 /* Bounds checks. */
44454- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44455+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44456 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44457 "inode 0x%lx or driver bug.", vdir->i_ino);
44458 goto err_out;
44459diff -urNp linux-3.0.7/fs/ntfs/file.c linux-3.0.7/fs/ntfs/file.c
44460--- linux-3.0.7/fs/ntfs/file.c 2011-07-21 22:17:23.000000000 -0400
44461+++ linux-3.0.7/fs/ntfs/file.c 2011-08-23 21:47:56.000000000 -0400
44462@@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
44463 #endif /* NTFS_RW */
44464 };
44465
44466-const struct file_operations ntfs_empty_file_ops = {};
44467+const struct file_operations ntfs_empty_file_ops __read_only;
44468
44469-const struct inode_operations ntfs_empty_inode_ops = {};
44470+const struct inode_operations ntfs_empty_inode_ops __read_only;
44471diff -urNp linux-3.0.7/fs/ocfs2/localalloc.c linux-3.0.7/fs/ocfs2/localalloc.c
44472--- linux-3.0.7/fs/ocfs2/localalloc.c 2011-07-21 22:17:23.000000000 -0400
44473+++ linux-3.0.7/fs/ocfs2/localalloc.c 2011-08-23 21:47:56.000000000 -0400
44474@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44475 goto bail;
44476 }
44477
44478- atomic_inc(&osb->alloc_stats.moves);
44479+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44480
44481 bail:
44482 if (handle)
44483diff -urNp linux-3.0.7/fs/ocfs2/namei.c linux-3.0.7/fs/ocfs2/namei.c
44484--- linux-3.0.7/fs/ocfs2/namei.c 2011-07-21 22:17:23.000000000 -0400
44485+++ linux-3.0.7/fs/ocfs2/namei.c 2011-08-23 21:48:14.000000000 -0400
44486@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
44487 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44488 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44489
44490+ pax_track_stack();
44491+
44492 /* At some point it might be nice to break this function up a
44493 * bit. */
44494
44495diff -urNp linux-3.0.7/fs/ocfs2/ocfs2.h linux-3.0.7/fs/ocfs2/ocfs2.h
44496--- linux-3.0.7/fs/ocfs2/ocfs2.h 2011-07-21 22:17:23.000000000 -0400
44497+++ linux-3.0.7/fs/ocfs2/ocfs2.h 2011-08-23 21:47:56.000000000 -0400
44498@@ -235,11 +235,11 @@ enum ocfs2_vol_state
44499
44500 struct ocfs2_alloc_stats
44501 {
44502- atomic_t moves;
44503- atomic_t local_data;
44504- atomic_t bitmap_data;
44505- atomic_t bg_allocs;
44506- atomic_t bg_extends;
44507+ atomic_unchecked_t moves;
44508+ atomic_unchecked_t local_data;
44509+ atomic_unchecked_t bitmap_data;
44510+ atomic_unchecked_t bg_allocs;
44511+ atomic_unchecked_t bg_extends;
44512 };
44513
44514 enum ocfs2_local_alloc_state
44515diff -urNp linux-3.0.7/fs/ocfs2/suballoc.c linux-3.0.7/fs/ocfs2/suballoc.c
44516--- linux-3.0.7/fs/ocfs2/suballoc.c 2011-07-21 22:17:23.000000000 -0400
44517+++ linux-3.0.7/fs/ocfs2/suballoc.c 2011-08-23 21:47:56.000000000 -0400
44518@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
44519 mlog_errno(status);
44520 goto bail;
44521 }
44522- atomic_inc(&osb->alloc_stats.bg_extends);
44523+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44524
44525 /* You should never ask for this much metadata */
44526 BUG_ON(bits_wanted >
44527@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
44528 mlog_errno(status);
44529 goto bail;
44530 }
44531- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44532+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44533
44534 *suballoc_loc = res.sr_bg_blkno;
44535 *suballoc_bit_start = res.sr_bit_offset;
44536@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
44537 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44538 res->sr_bits);
44539
44540- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44541+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44542
44543 BUG_ON(res->sr_bits != 1);
44544
44545@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
44546 mlog_errno(status);
44547 goto bail;
44548 }
44549- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44550+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44551
44552 BUG_ON(res.sr_bits != 1);
44553
44554@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
44555 cluster_start,
44556 num_clusters);
44557 if (!status)
44558- atomic_inc(&osb->alloc_stats.local_data);
44559+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44560 } else {
44561 if (min_clusters > (osb->bitmap_cpg - 1)) {
44562 /* The only paths asking for contiguousness
44563@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
44564 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44565 res.sr_bg_blkno,
44566 res.sr_bit_offset);
44567- atomic_inc(&osb->alloc_stats.bitmap_data);
44568+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44569 *num_clusters = res.sr_bits;
44570 }
44571 }
44572diff -urNp linux-3.0.7/fs/ocfs2/super.c linux-3.0.7/fs/ocfs2/super.c
44573--- linux-3.0.7/fs/ocfs2/super.c 2011-07-21 22:17:23.000000000 -0400
44574+++ linux-3.0.7/fs/ocfs2/super.c 2011-08-23 21:47:56.000000000 -0400
44575@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44576 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44577 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44578 "Stats",
44579- atomic_read(&osb->alloc_stats.bitmap_data),
44580- atomic_read(&osb->alloc_stats.local_data),
44581- atomic_read(&osb->alloc_stats.bg_allocs),
44582- atomic_read(&osb->alloc_stats.moves),
44583- atomic_read(&osb->alloc_stats.bg_extends));
44584+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44585+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44586+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44587+ atomic_read_unchecked(&osb->alloc_stats.moves),
44588+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44589
44590 out += snprintf(buf + out, len - out,
44591 "%10s => State: %u Descriptor: %llu Size: %u bits "
44592@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
44593 spin_lock_init(&osb->osb_xattr_lock);
44594 ocfs2_init_steal_slots(osb);
44595
44596- atomic_set(&osb->alloc_stats.moves, 0);
44597- atomic_set(&osb->alloc_stats.local_data, 0);
44598- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44599- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44600- atomic_set(&osb->alloc_stats.bg_extends, 0);
44601+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44602+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44603+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44604+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44605+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44606
44607 /* Copy the blockcheck stats from the superblock probe */
44608 osb->osb_ecc_stats = *stats;
44609diff -urNp linux-3.0.7/fs/ocfs2/symlink.c linux-3.0.7/fs/ocfs2/symlink.c
44610--- linux-3.0.7/fs/ocfs2/symlink.c 2011-07-21 22:17:23.000000000 -0400
44611+++ linux-3.0.7/fs/ocfs2/symlink.c 2011-08-23 21:47:56.000000000 -0400
44612@@ -142,7 +142,7 @@ bail:
44613
44614 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44615 {
44616- char *link = nd_get_link(nd);
44617+ const char *link = nd_get_link(nd);
44618 if (!IS_ERR(link))
44619 kfree(link);
44620 }
44621diff -urNp linux-3.0.7/fs/open.c linux-3.0.7/fs/open.c
44622--- linux-3.0.7/fs/open.c 2011-07-21 22:17:23.000000000 -0400
44623+++ linux-3.0.7/fs/open.c 2011-09-14 09:16:46.000000000 -0400
44624@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
44625 error = locks_verify_truncate(inode, NULL, length);
44626 if (!error)
44627 error = security_path_truncate(&path);
44628+
44629+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44630+ error = -EACCES;
44631+
44632 if (!error)
44633 error = do_truncate(path.dentry, length, 0, NULL);
44634
44635@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44636 if (__mnt_is_readonly(path.mnt))
44637 res = -EROFS;
44638
44639+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44640+ res = -EACCES;
44641+
44642 out_path_release:
44643 path_put(&path);
44644 out:
44645@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44646 if (error)
44647 goto dput_and_out;
44648
44649+ gr_log_chdir(path.dentry, path.mnt);
44650+
44651 set_fs_pwd(current->fs, &path);
44652
44653 dput_and_out:
44654@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44655 goto out_putf;
44656
44657 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
44658+
44659+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44660+ error = -EPERM;
44661+
44662+ if (!error)
44663+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44664+
44665 if (!error)
44666 set_fs_pwd(current->fs, &file->f_path);
44667 out_putf:
44668@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
44669 if (error)
44670 goto dput_and_out;
44671
44672+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44673+ goto dput_and_out;
44674+
44675 set_fs_root(current->fs, &path);
44676+
44677+ gr_handle_chroot_chdir(&path);
44678+
44679 error = 0;
44680 dput_and_out:
44681 path_put(&path);
44682@@ -466,12 +488,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44683 err = mnt_want_write_file(file);
44684 if (err)
44685 goto out_putf;
44686+
44687 mutex_lock(&inode->i_mutex);
44688+
44689+ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
44690+ err = -EACCES;
44691+ goto out_unlock;
44692+ }
44693+
44694 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
44695 if (err)
44696 goto out_unlock;
44697 if (mode == (mode_t) -1)
44698 mode = inode->i_mode;
44699+
44700+ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
44701+ err = -EACCES;
44702+ goto out_unlock;
44703+ }
44704+
44705 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44706 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44707 err = notify_change(dentry, &newattrs);
44708@@ -499,12 +534,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44709 error = mnt_want_write(path.mnt);
44710 if (error)
44711 goto dput_and_out;
44712+
44713 mutex_lock(&inode->i_mutex);
44714+
44715+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44716+ error = -EACCES;
44717+ goto out_unlock;
44718+ }
44719+
44720 error = security_path_chmod(path.dentry, path.mnt, mode);
44721 if (error)
44722 goto out_unlock;
44723 if (mode == (mode_t) -1)
44724 mode = inode->i_mode;
44725+
44726+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44727+ error = -EACCES;
44728+ goto out_unlock;
44729+ }
44730+
44731 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44732 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44733 error = notify_change(path.dentry, &newattrs);
44734@@ -528,6 +576,9 @@ static int chown_common(struct path *pat
44735 int error;
44736 struct iattr newattrs;
44737
44738+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
44739+ return -EACCES;
44740+
44741 newattrs.ia_valid = ATTR_CTIME;
44742 if (user != (uid_t) -1) {
44743 newattrs.ia_valid |= ATTR_UID;
44744@@ -998,7 +1049,10 @@ long do_sys_open(int dfd, const char __u
44745 if (!IS_ERR(tmp)) {
44746 fd = get_unused_fd_flags(flags);
44747 if (fd >= 0) {
44748- struct file *f = do_filp_open(dfd, tmp, &op, lookup);
44749+ struct file *f;
44750+ /* don't allow to be set by userland */
44751+ flags &= ~FMODE_GREXEC;
44752+ f = do_filp_open(dfd, tmp, &op, lookup);
44753 if (IS_ERR(f)) {
44754 put_unused_fd(fd);
44755 fd = PTR_ERR(f);
44756diff -urNp linux-3.0.7/fs/partitions/ldm.c linux-3.0.7/fs/partitions/ldm.c
44757--- linux-3.0.7/fs/partitions/ldm.c 2011-07-21 22:17:23.000000000 -0400
44758+++ linux-3.0.7/fs/partitions/ldm.c 2011-08-23 21:48:14.000000000 -0400
44759@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44760 ldm_error ("A VBLK claims to have %d parts.", num);
44761 return false;
44762 }
44763+
44764 if (rec >= num) {
44765 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44766 return false;
44767@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44768 goto found;
44769 }
44770
44771- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44772+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44773 if (!f) {
44774 ldm_crit ("Out of memory.");
44775 return false;
44776diff -urNp linux-3.0.7/fs/pipe.c linux-3.0.7/fs/pipe.c
44777--- linux-3.0.7/fs/pipe.c 2011-07-21 22:17:23.000000000 -0400
44778+++ linux-3.0.7/fs/pipe.c 2011-08-23 21:48:14.000000000 -0400
44779@@ -420,9 +420,9 @@ redo:
44780 }
44781 if (bufs) /* More to do? */
44782 continue;
44783- if (!pipe->writers)
44784+ if (!atomic_read(&pipe->writers))
44785 break;
44786- if (!pipe->waiting_writers) {
44787+ if (!atomic_read(&pipe->waiting_writers)) {
44788 /* syscall merging: Usually we must not sleep
44789 * if O_NONBLOCK is set, or if we got some data.
44790 * But if a writer sleeps in kernel space, then
44791@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
44792 mutex_lock(&inode->i_mutex);
44793 pipe = inode->i_pipe;
44794
44795- if (!pipe->readers) {
44796+ if (!atomic_read(&pipe->readers)) {
44797 send_sig(SIGPIPE, current, 0);
44798 ret = -EPIPE;
44799 goto out;
44800@@ -530,7 +530,7 @@ redo1:
44801 for (;;) {
44802 int bufs;
44803
44804- if (!pipe->readers) {
44805+ if (!atomic_read(&pipe->readers)) {
44806 send_sig(SIGPIPE, current, 0);
44807 if (!ret)
44808 ret = -EPIPE;
44809@@ -616,9 +616,9 @@ redo2:
44810 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44811 do_wakeup = 0;
44812 }
44813- pipe->waiting_writers++;
44814+ atomic_inc(&pipe->waiting_writers);
44815 pipe_wait(pipe);
44816- pipe->waiting_writers--;
44817+ atomic_dec(&pipe->waiting_writers);
44818 }
44819 out:
44820 mutex_unlock(&inode->i_mutex);
44821@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
44822 mask = 0;
44823 if (filp->f_mode & FMODE_READ) {
44824 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44825- if (!pipe->writers && filp->f_version != pipe->w_counter)
44826+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44827 mask |= POLLHUP;
44828 }
44829
44830@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
44831 * Most Unices do not set POLLERR for FIFOs but on Linux they
44832 * behave exactly like pipes for poll().
44833 */
44834- if (!pipe->readers)
44835+ if (!atomic_read(&pipe->readers))
44836 mask |= POLLERR;
44837 }
44838
44839@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
44840
44841 mutex_lock(&inode->i_mutex);
44842 pipe = inode->i_pipe;
44843- pipe->readers -= decr;
44844- pipe->writers -= decw;
44845+ atomic_sub(decr, &pipe->readers);
44846+ atomic_sub(decw, &pipe->writers);
44847
44848- if (!pipe->readers && !pipe->writers) {
44849+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44850 free_pipe_info(inode);
44851 } else {
44852 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
44853@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
44854
44855 if (inode->i_pipe) {
44856 ret = 0;
44857- inode->i_pipe->readers++;
44858+ atomic_inc(&inode->i_pipe->readers);
44859 }
44860
44861 mutex_unlock(&inode->i_mutex);
44862@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
44863
44864 if (inode->i_pipe) {
44865 ret = 0;
44866- inode->i_pipe->writers++;
44867+ atomic_inc(&inode->i_pipe->writers);
44868 }
44869
44870 mutex_unlock(&inode->i_mutex);
44871@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
44872 if (inode->i_pipe) {
44873 ret = 0;
44874 if (filp->f_mode & FMODE_READ)
44875- inode->i_pipe->readers++;
44876+ atomic_inc(&inode->i_pipe->readers);
44877 if (filp->f_mode & FMODE_WRITE)
44878- inode->i_pipe->writers++;
44879+ atomic_inc(&inode->i_pipe->writers);
44880 }
44881
44882 mutex_unlock(&inode->i_mutex);
44883@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
44884 inode->i_pipe = NULL;
44885 }
44886
44887-static struct vfsmount *pipe_mnt __read_mostly;
44888+struct vfsmount *pipe_mnt __read_mostly;
44889
44890 /*
44891 * pipefs_dname() is called from d_path().
44892@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
44893 goto fail_iput;
44894 inode->i_pipe = pipe;
44895
44896- pipe->readers = pipe->writers = 1;
44897+ atomic_set(&pipe->readers, 1);
44898+ atomic_set(&pipe->writers, 1);
44899 inode->i_fop = &rdwr_pipefifo_fops;
44900
44901 /*
44902diff -urNp linux-3.0.7/fs/proc/array.c linux-3.0.7/fs/proc/array.c
44903--- linux-3.0.7/fs/proc/array.c 2011-07-21 22:17:23.000000000 -0400
44904+++ linux-3.0.7/fs/proc/array.c 2011-08-23 21:48:14.000000000 -0400
44905@@ -60,6 +60,7 @@
44906 #include <linux/tty.h>
44907 #include <linux/string.h>
44908 #include <linux/mman.h>
44909+#include <linux/grsecurity.h>
44910 #include <linux/proc_fs.h>
44911 #include <linux/ioport.h>
44912 #include <linux/uaccess.h>
44913@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
44914 seq_putc(m, '\n');
44915 }
44916
44917+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44918+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44919+{
44920+ if (p->mm)
44921+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44922+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44923+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44924+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44925+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44926+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44927+ else
44928+ seq_printf(m, "PaX:\t-----\n");
44929+}
44930+#endif
44931+
44932 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44933 struct pid *pid, struct task_struct *task)
44934 {
44935@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
44936 task_cpus_allowed(m, task);
44937 cpuset_task_status_allowed(m, task);
44938 task_context_switch_counts(m, task);
44939+
44940+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44941+ task_pax(m, task);
44942+#endif
44943+
44944+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44945+ task_grsec_rbac(m, task);
44946+#endif
44947+
44948 return 0;
44949 }
44950
44951+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44952+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44953+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44954+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44955+#endif
44956+
44957 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44958 struct pid *pid, struct task_struct *task, int whole)
44959 {
44960@@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
44961 cputime_t cutime, cstime, utime, stime;
44962 cputime_t cgtime, gtime;
44963 unsigned long rsslim = 0;
44964- char tcomm[sizeof(task->comm)];
44965+ char tcomm[sizeof(task->comm)] = { 0 };
44966 unsigned long flags;
44967
44968+ pax_track_stack();
44969+
44970 state = *get_task_state(task);
44971 vsize = eip = esp = 0;
44972 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44973@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
44974 gtime = task->gtime;
44975 }
44976
44977+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44978+ if (PAX_RAND_FLAGS(mm)) {
44979+ eip = 0;
44980+ esp = 0;
44981+ wchan = 0;
44982+ }
44983+#endif
44984+#ifdef CONFIG_GRKERNSEC_HIDESYM
44985+ wchan = 0;
44986+ eip =0;
44987+ esp =0;
44988+#endif
44989+
44990 /* scale priority and nice values from timeslices to -20..20 */
44991 /* to make it look like a "normal" Unix priority/nice value */
44992 priority = task_prio(task);
44993@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
44994 vsize,
44995 mm ? get_mm_rss(mm) : 0,
44996 rsslim,
44997+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44998+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44999+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45000+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45001+#else
45002 mm ? (permitted ? mm->start_code : 1) : 0,
45003 mm ? (permitted ? mm->end_code : 1) : 0,
45004 (permitted && mm) ? mm->start_stack : 0,
45005+#endif
45006 esp,
45007 eip,
45008 /* The signal information here is obsolete.
45009@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45010
45011 return 0;
45012 }
45013+
45014+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45015+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45016+{
45017+ u32 curr_ip = 0;
45018+ unsigned long flags;
45019+
45020+ if (lock_task_sighand(task, &flags)) {
45021+ curr_ip = task->signal->curr_ip;
45022+ unlock_task_sighand(task, &flags);
45023+ }
45024+
45025+ return sprintf(buffer, "%pI4\n", &curr_ip);
45026+}
45027+#endif
45028diff -urNp linux-3.0.7/fs/proc/base.c linux-3.0.7/fs/proc/base.c
45029--- linux-3.0.7/fs/proc/base.c 2011-09-02 18:11:21.000000000 -0400
45030+++ linux-3.0.7/fs/proc/base.c 2011-10-19 03:59:32.000000000 -0400
45031@@ -107,6 +107,22 @@ struct pid_entry {
45032 union proc_op op;
45033 };
45034
45035+struct getdents_callback {
45036+ struct linux_dirent __user * current_dir;
45037+ struct linux_dirent __user * previous;
45038+ struct file * file;
45039+ int count;
45040+ int error;
45041+};
45042+
45043+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45044+ loff_t offset, u64 ino, unsigned int d_type)
45045+{
45046+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45047+ buf->error = -EINVAL;
45048+ return 0;
45049+}
45050+
45051 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45052 .name = (NAME), \
45053 .len = sizeof(NAME) - 1, \
45054@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45055 if (task == current)
45056 return mm;
45057
45058+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45059+ return ERR_PTR(-EPERM);
45060+
45061 /*
45062 * If current is actively ptrace'ing, and would also be
45063 * permitted to freshly attach with ptrace now, permit it.
45064@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45065 if (!mm->arg_end)
45066 goto out_mm; /* Shh! No looking before we're done */
45067
45068+ if (gr_acl_handle_procpidmem(task))
45069+ goto out_mm;
45070+
45071 len = mm->arg_end - mm->arg_start;
45072
45073 if (len > PAGE_SIZE)
45074@@ -309,12 +331,28 @@ out:
45075 return res;
45076 }
45077
45078+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45079+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45080+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45081+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45082+#endif
45083+
45084 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45085 {
45086 struct mm_struct *mm = mm_for_maps(task);
45087 int res = PTR_ERR(mm);
45088 if (mm && !IS_ERR(mm)) {
45089 unsigned int nwords = 0;
45090+
45091+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45092+ /* allow if we're currently ptracing this task */
45093+ if (PAX_RAND_FLAGS(mm) &&
45094+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45095+ mmput(mm);
45096+ return 0;
45097+ }
45098+#endif
45099+
45100 do {
45101 nwords += 2;
45102 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45103@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45104 }
45105
45106
45107-#ifdef CONFIG_KALLSYMS
45108+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45109 /*
45110 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45111 * Returns the resolved symbol. If that fails, simply return the address.
45112@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45113 mutex_unlock(&task->signal->cred_guard_mutex);
45114 }
45115
45116-#ifdef CONFIG_STACKTRACE
45117+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45118
45119 #define MAX_STACK_TRACE_DEPTH 64
45120
45121@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45122 return count;
45123 }
45124
45125-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45126+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45127 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45128 {
45129 long nr;
45130@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45131 /************************************************************************/
45132
45133 /* permission checks */
45134-static int proc_fd_access_allowed(struct inode *inode)
45135+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45136 {
45137 struct task_struct *task;
45138 int allowed = 0;
45139@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45140 */
45141 task = get_proc_task(inode);
45142 if (task) {
45143- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45144+ if (log)
45145+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45146+ else
45147+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45148 put_task_struct(task);
45149 }
45150 return allowed;
45151@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45152 if (!task)
45153 goto out_no_task;
45154
45155+ if (gr_acl_handle_procpidmem(task))
45156+ goto out;
45157+
45158 ret = -ENOMEM;
45159 page = (char *)__get_free_page(GFP_TEMPORARY);
45160 if (!page)
45161@@ -1614,7 +1658,7 @@ static void *proc_pid_follow_link(struct
45162 path_put(&nd->path);
45163
45164 /* Are we allowed to snoop on the tasks file descriptors? */
45165- if (!proc_fd_access_allowed(inode))
45166+ if (!proc_fd_access_allowed(inode,0))
45167 goto out;
45168
45169 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45170@@ -1653,8 +1697,18 @@ static int proc_pid_readlink(struct dent
45171 struct path path;
45172
45173 /* Are we allowed to snoop on the tasks file descriptors? */
45174- if (!proc_fd_access_allowed(inode))
45175- goto out;
45176+ /* logging this is needed for learning on chromium to work properly,
45177+ but we don't want to flood the logs from 'ps' which does a readlink
45178+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45179+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45180+ */
45181+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45182+ if (!proc_fd_access_allowed(inode,0))
45183+ goto out;
45184+ } else {
45185+ if (!proc_fd_access_allowed(inode,1))
45186+ goto out;
45187+ }
45188
45189 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45190 if (error)
45191@@ -1719,7 +1773,11 @@ struct inode *proc_pid_make_inode(struct
45192 rcu_read_lock();
45193 cred = __task_cred(task);
45194 inode->i_uid = cred->euid;
45195+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45196+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45197+#else
45198 inode->i_gid = cred->egid;
45199+#endif
45200 rcu_read_unlock();
45201 }
45202 security_task_to_inode(task, inode);
45203@@ -1737,6 +1795,9 @@ int pid_getattr(struct vfsmount *mnt, st
45204 struct inode *inode = dentry->d_inode;
45205 struct task_struct *task;
45206 const struct cred *cred;
45207+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45208+ const struct cred *tmpcred = current_cred();
45209+#endif
45210
45211 generic_fillattr(inode, stat);
45212
45213@@ -1744,13 +1805,41 @@ int pid_getattr(struct vfsmount *mnt, st
45214 stat->uid = 0;
45215 stat->gid = 0;
45216 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45217+
45218+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45219+ rcu_read_unlock();
45220+ return -ENOENT;
45221+ }
45222+
45223 if (task) {
45224+ cred = __task_cred(task);
45225+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45226+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45227+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45228+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45229+#endif
45230+ ) {
45231+#endif
45232 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45233+#ifdef CONFIG_GRKERNSEC_PROC_USER
45234+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45235+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45236+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45237+#endif
45238 task_dumpable(task)) {
45239- cred = __task_cred(task);
45240 stat->uid = cred->euid;
45241+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45242+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45243+#else
45244 stat->gid = cred->egid;
45245+#endif
45246+ }
45247+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45248+ } else {
45249+ rcu_read_unlock();
45250+ return -ENOENT;
45251 }
45252+#endif
45253 }
45254 rcu_read_unlock();
45255 return 0;
45256@@ -1787,11 +1876,20 @@ int pid_revalidate(struct dentry *dentry
45257
45258 if (task) {
45259 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45260+#ifdef CONFIG_GRKERNSEC_PROC_USER
45261+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45262+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45263+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45264+#endif
45265 task_dumpable(task)) {
45266 rcu_read_lock();
45267 cred = __task_cred(task);
45268 inode->i_uid = cred->euid;
45269+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45270+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45271+#else
45272 inode->i_gid = cred->egid;
45273+#endif
45274 rcu_read_unlock();
45275 } else {
45276 inode->i_uid = 0;
45277@@ -1909,7 +2007,8 @@ static int proc_fd_info(struct inode *in
45278 int fd = proc_fd(inode);
45279
45280 if (task) {
45281- files = get_files_struct(task);
45282+ if (!gr_acl_handle_procpidmem(task))
45283+ files = get_files_struct(task);
45284 put_task_struct(task);
45285 }
45286 if (files) {
45287@@ -2169,11 +2268,21 @@ static const struct file_operations proc
45288 */
45289 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
45290 {
45291+ struct task_struct *task;
45292 int rv = generic_permission(inode, mask, flags, NULL);
45293- if (rv == 0)
45294- return 0;
45295+
45296 if (task_pid(current) == proc_pid(inode))
45297 rv = 0;
45298+
45299+ task = get_proc_task(inode);
45300+ if (task == NULL)
45301+ return rv;
45302+
45303+ if (gr_acl_handle_procpidmem(task))
45304+ rv = -EACCES;
45305+
45306+ put_task_struct(task);
45307+
45308 return rv;
45309 }
45310
45311@@ -2283,6 +2392,9 @@ static struct dentry *proc_pident_lookup
45312 if (!task)
45313 goto out_no_task;
45314
45315+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45316+ goto out;
45317+
45318 /*
45319 * Yes, it does not scale. And it should not. Don't add
45320 * new entries into /proc/<tgid>/ without very good reasons.
45321@@ -2327,6 +2439,9 @@ static int proc_pident_readdir(struct fi
45322 if (!task)
45323 goto out_no_task;
45324
45325+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45326+ goto out;
45327+
45328 ret = 0;
45329 i = filp->f_pos;
45330 switch (i) {
45331@@ -2597,7 +2712,7 @@ static void *proc_self_follow_link(struc
45332 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45333 void *cookie)
45334 {
45335- char *s = nd_get_link(nd);
45336+ const char *s = nd_get_link(nd);
45337 if (!IS_ERR(s))
45338 __putname(s);
45339 }
45340@@ -2656,6 +2771,7 @@ static struct dentry *proc_base_instanti
45341 if (p->fop)
45342 inode->i_fop = p->fop;
45343 ei->op = p->op;
45344+
45345 d_add(dentry, inode);
45346 error = NULL;
45347 out:
45348@@ -2795,7 +2911,7 @@ static const struct pid_entry tgid_base_
45349 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45350 #endif
45351 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45352-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45353+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45354 INF("syscall", S_IRUGO, proc_pid_syscall),
45355 #endif
45356 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45357@@ -2820,10 +2936,10 @@ static const struct pid_entry tgid_base_
45358 #ifdef CONFIG_SECURITY
45359 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45360 #endif
45361-#ifdef CONFIG_KALLSYMS
45362+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45363 INF("wchan", S_IRUGO, proc_pid_wchan),
45364 #endif
45365-#ifdef CONFIG_STACKTRACE
45366+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45367 ONE("stack", S_IRUGO, proc_pid_stack),
45368 #endif
45369 #ifdef CONFIG_SCHEDSTATS
45370@@ -2857,6 +2973,9 @@ static const struct pid_entry tgid_base_
45371 #ifdef CONFIG_HARDWALL
45372 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45373 #endif
45374+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45375+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45376+#endif
45377 };
45378
45379 static int proc_tgid_base_readdir(struct file * filp,
45380@@ -2982,7 +3101,14 @@ static struct dentry *proc_pid_instantia
45381 if (!inode)
45382 goto out;
45383
45384+#ifdef CONFIG_GRKERNSEC_PROC_USER
45385+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45386+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45387+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45388+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45389+#else
45390 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45391+#endif
45392 inode->i_op = &proc_tgid_base_inode_operations;
45393 inode->i_fop = &proc_tgid_base_operations;
45394 inode->i_flags|=S_IMMUTABLE;
45395@@ -3024,7 +3150,14 @@ struct dentry *proc_pid_lookup(struct in
45396 if (!task)
45397 goto out;
45398
45399+ if (!has_group_leader_pid(task))
45400+ goto out_put_task;
45401+
45402+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45403+ goto out_put_task;
45404+
45405 result = proc_pid_instantiate(dir, dentry, task, NULL);
45406+out_put_task:
45407 put_task_struct(task);
45408 out:
45409 return result;
45410@@ -3089,6 +3222,11 @@ int proc_pid_readdir(struct file * filp,
45411 {
45412 unsigned int nr;
45413 struct task_struct *reaper;
45414+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45415+ const struct cred *tmpcred = current_cred();
45416+ const struct cred *itercred;
45417+#endif
45418+ filldir_t __filldir = filldir;
45419 struct tgid_iter iter;
45420 struct pid_namespace *ns;
45421
45422@@ -3112,8 +3250,27 @@ int proc_pid_readdir(struct file * filp,
45423 for (iter = next_tgid(ns, iter);
45424 iter.task;
45425 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45426+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45427+ rcu_read_lock();
45428+ itercred = __task_cred(iter.task);
45429+#endif
45430+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45431+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45432+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45433+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45434+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45435+#endif
45436+ )
45437+#endif
45438+ )
45439+ __filldir = &gr_fake_filldir;
45440+ else
45441+ __filldir = filldir;
45442+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45443+ rcu_read_unlock();
45444+#endif
45445 filp->f_pos = iter.tgid + TGID_OFFSET;
45446- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45447+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45448 put_task_struct(iter.task);
45449 goto out;
45450 }
45451@@ -3141,7 +3298,7 @@ static const struct pid_entry tid_base_s
45452 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45453 #endif
45454 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45455-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45456+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45457 INF("syscall", S_IRUGO, proc_pid_syscall),
45458 #endif
45459 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45460@@ -3165,10 +3322,10 @@ static const struct pid_entry tid_base_s
45461 #ifdef CONFIG_SECURITY
45462 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45463 #endif
45464-#ifdef CONFIG_KALLSYMS
45465+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45466 INF("wchan", S_IRUGO, proc_pid_wchan),
45467 #endif
45468-#ifdef CONFIG_STACKTRACE
45469+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45470 ONE("stack", S_IRUGO, proc_pid_stack),
45471 #endif
45472 #ifdef CONFIG_SCHEDSTATS
45473diff -urNp linux-3.0.7/fs/proc/cmdline.c linux-3.0.7/fs/proc/cmdline.c
45474--- linux-3.0.7/fs/proc/cmdline.c 2011-07-21 22:17:23.000000000 -0400
45475+++ linux-3.0.7/fs/proc/cmdline.c 2011-08-23 21:48:14.000000000 -0400
45476@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45477
45478 static int __init proc_cmdline_init(void)
45479 {
45480+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45481+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45482+#else
45483 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45484+#endif
45485 return 0;
45486 }
45487 module_init(proc_cmdline_init);
45488diff -urNp linux-3.0.7/fs/proc/devices.c linux-3.0.7/fs/proc/devices.c
45489--- linux-3.0.7/fs/proc/devices.c 2011-07-21 22:17:23.000000000 -0400
45490+++ linux-3.0.7/fs/proc/devices.c 2011-08-23 21:48:14.000000000 -0400
45491@@ -64,7 +64,11 @@ static const struct file_operations proc
45492
45493 static int __init proc_devices_init(void)
45494 {
45495+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45496+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45497+#else
45498 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45499+#endif
45500 return 0;
45501 }
45502 module_init(proc_devices_init);
45503diff -urNp linux-3.0.7/fs/proc/inode.c linux-3.0.7/fs/proc/inode.c
45504--- linux-3.0.7/fs/proc/inode.c 2011-07-21 22:17:23.000000000 -0400
45505+++ linux-3.0.7/fs/proc/inode.c 2011-10-19 03:59:32.000000000 -0400
45506@@ -18,12 +18,18 @@
45507 #include <linux/module.h>
45508 #include <linux/sysctl.h>
45509 #include <linux/slab.h>
45510+#include <linux/grsecurity.h>
45511
45512 #include <asm/system.h>
45513 #include <asm/uaccess.h>
45514
45515 #include "internal.h"
45516
45517+#ifdef CONFIG_PROC_SYSCTL
45518+extern const struct inode_operations proc_sys_inode_operations;
45519+extern const struct inode_operations proc_sys_dir_operations;
45520+#endif
45521+
45522 static void proc_evict_inode(struct inode *inode)
45523 {
45524 struct proc_dir_entry *de;
45525@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45526 ns_ops = PROC_I(inode)->ns_ops;
45527 if (ns_ops && ns_ops->put)
45528 ns_ops->put(PROC_I(inode)->ns);
45529+
45530+#ifdef CONFIG_PROC_SYSCTL
45531+ if (inode->i_op == &proc_sys_inode_operations ||
45532+ inode->i_op == &proc_sys_dir_operations)
45533+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45534+#endif
45535+
45536 }
45537
45538 static struct kmem_cache * proc_inode_cachep;
45539@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
45540 if (de->mode) {
45541 inode->i_mode = de->mode;
45542 inode->i_uid = de->uid;
45543+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45544+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45545+#else
45546 inode->i_gid = de->gid;
45547+#endif
45548 }
45549 if (de->size)
45550 inode->i_size = de->size;
45551diff -urNp linux-3.0.7/fs/proc/internal.h linux-3.0.7/fs/proc/internal.h
45552--- linux-3.0.7/fs/proc/internal.h 2011-07-21 22:17:23.000000000 -0400
45553+++ linux-3.0.7/fs/proc/internal.h 2011-08-23 21:48:14.000000000 -0400
45554@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45555 struct pid *pid, struct task_struct *task);
45556 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45557 struct pid *pid, struct task_struct *task);
45558+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45559+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45560+#endif
45561 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45562
45563 extern const struct file_operations proc_maps_operations;
45564diff -urNp linux-3.0.7/fs/proc/Kconfig linux-3.0.7/fs/proc/Kconfig
45565--- linux-3.0.7/fs/proc/Kconfig 2011-07-21 22:17:23.000000000 -0400
45566+++ linux-3.0.7/fs/proc/Kconfig 2011-08-23 21:48:14.000000000 -0400
45567@@ -30,12 +30,12 @@ config PROC_FS
45568
45569 config PROC_KCORE
45570 bool "/proc/kcore support" if !ARM
45571- depends on PROC_FS && MMU
45572+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45573
45574 config PROC_VMCORE
45575 bool "/proc/vmcore support"
45576- depends on PROC_FS && CRASH_DUMP
45577- default y
45578+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45579+ default n
45580 help
45581 Exports the dump image of crashed kernel in ELF format.
45582
45583@@ -59,8 +59,8 @@ config PROC_SYSCTL
45584 limited in memory.
45585
45586 config PROC_PAGE_MONITOR
45587- default y
45588- depends on PROC_FS && MMU
45589+ default n
45590+ depends on PROC_FS && MMU && !GRKERNSEC
45591 bool "Enable /proc page monitoring" if EXPERT
45592 help
45593 Various /proc files exist to monitor process memory utilization:
45594diff -urNp linux-3.0.7/fs/proc/kcore.c linux-3.0.7/fs/proc/kcore.c
45595--- linux-3.0.7/fs/proc/kcore.c 2011-07-21 22:17:23.000000000 -0400
45596+++ linux-3.0.7/fs/proc/kcore.c 2011-08-23 21:48:14.000000000 -0400
45597@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
45598 off_t offset = 0;
45599 struct kcore_list *m;
45600
45601+ pax_track_stack();
45602+
45603 /* setup ELF header */
45604 elf = (struct elfhdr *) bufp;
45605 bufp += sizeof(struct elfhdr);
45606@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
45607 * the addresses in the elf_phdr on our list.
45608 */
45609 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45610- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45611+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45612+ if (tsz > buflen)
45613 tsz = buflen;
45614-
45615+
45616 while (buflen) {
45617 struct kcore_list *m;
45618
45619@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
45620 kfree(elf_buf);
45621 } else {
45622 if (kern_addr_valid(start)) {
45623- unsigned long n;
45624+ char *elf_buf;
45625+ mm_segment_t oldfs;
45626
45627- n = copy_to_user(buffer, (char *)start, tsz);
45628- /*
45629- * We cannot distingush between fault on source
45630- * and fault on destination. When this happens
45631- * we clear too and hope it will trigger the
45632- * EFAULT again.
45633- */
45634- if (n) {
45635- if (clear_user(buffer + tsz - n,
45636- n))
45637+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45638+ if (!elf_buf)
45639+ return -ENOMEM;
45640+ oldfs = get_fs();
45641+ set_fs(KERNEL_DS);
45642+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45643+ set_fs(oldfs);
45644+ if (copy_to_user(buffer, elf_buf, tsz)) {
45645+ kfree(elf_buf);
45646 return -EFAULT;
45647+ }
45648 }
45649+ set_fs(oldfs);
45650+ kfree(elf_buf);
45651 } else {
45652 if (clear_user(buffer, tsz))
45653 return -EFAULT;
45654@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
45655
45656 static int open_kcore(struct inode *inode, struct file *filp)
45657 {
45658+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45659+ return -EPERM;
45660+#endif
45661 if (!capable(CAP_SYS_RAWIO))
45662 return -EPERM;
45663 if (kcore_need_update)
45664diff -urNp linux-3.0.7/fs/proc/meminfo.c linux-3.0.7/fs/proc/meminfo.c
45665--- linux-3.0.7/fs/proc/meminfo.c 2011-07-21 22:17:23.000000000 -0400
45666+++ linux-3.0.7/fs/proc/meminfo.c 2011-08-23 21:48:14.000000000 -0400
45667@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45668 unsigned long pages[NR_LRU_LISTS];
45669 int lru;
45670
45671+ pax_track_stack();
45672+
45673 /*
45674 * display in kilobytes.
45675 */
45676@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
45677 vmi.used >> 10,
45678 vmi.largest_chunk >> 10
45679 #ifdef CONFIG_MEMORY_FAILURE
45680- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45681+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45682 #endif
45683 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
45684 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
45685diff -urNp linux-3.0.7/fs/proc/nommu.c linux-3.0.7/fs/proc/nommu.c
45686--- linux-3.0.7/fs/proc/nommu.c 2011-07-21 22:17:23.000000000 -0400
45687+++ linux-3.0.7/fs/proc/nommu.c 2011-08-23 21:47:56.000000000 -0400
45688@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
45689 if (len < 1)
45690 len = 1;
45691 seq_printf(m, "%*c", len, ' ');
45692- seq_path(m, &file->f_path, "");
45693+ seq_path(m, &file->f_path, "\n\\");
45694 }
45695
45696 seq_putc(m, '\n');
45697diff -urNp linux-3.0.7/fs/proc/proc_net.c linux-3.0.7/fs/proc/proc_net.c
45698--- linux-3.0.7/fs/proc/proc_net.c 2011-07-21 22:17:23.000000000 -0400
45699+++ linux-3.0.7/fs/proc/proc_net.c 2011-08-23 21:48:14.000000000 -0400
45700@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
45701 struct task_struct *task;
45702 struct nsproxy *ns;
45703 struct net *net = NULL;
45704+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45705+ const struct cred *cred = current_cred();
45706+#endif
45707+
45708+#ifdef CONFIG_GRKERNSEC_PROC_USER
45709+ if (cred->fsuid)
45710+ return net;
45711+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45712+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45713+ return net;
45714+#endif
45715
45716 rcu_read_lock();
45717 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45718diff -urNp linux-3.0.7/fs/proc/proc_sysctl.c linux-3.0.7/fs/proc/proc_sysctl.c
45719--- linux-3.0.7/fs/proc/proc_sysctl.c 2011-07-21 22:17:23.000000000 -0400
45720+++ linux-3.0.7/fs/proc/proc_sysctl.c 2011-10-19 03:59:32.000000000 -0400
45721@@ -8,11 +8,13 @@
45722 #include <linux/namei.h>
45723 #include "internal.h"
45724
45725+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45726+
45727 static const struct dentry_operations proc_sys_dentry_operations;
45728 static const struct file_operations proc_sys_file_operations;
45729-static const struct inode_operations proc_sys_inode_operations;
45730+const struct inode_operations proc_sys_inode_operations;
45731 static const struct file_operations proc_sys_dir_file_operations;
45732-static const struct inode_operations proc_sys_dir_operations;
45733+const struct inode_operations proc_sys_dir_operations;
45734
45735 static struct inode *proc_sys_make_inode(struct super_block *sb,
45736 struct ctl_table_header *head, struct ctl_table *table)
45737@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
45738
45739 err = NULL;
45740 d_set_d_op(dentry, &proc_sys_dentry_operations);
45741+
45742+ gr_handle_proc_create(dentry, inode);
45743+
45744 d_add(dentry, inode);
45745
45746+ if (gr_handle_sysctl(p, MAY_EXEC))
45747+ err = ERR_PTR(-ENOENT);
45748+
45749 out:
45750 sysctl_head_finish(head);
45751 return err;
45752@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
45753 return -ENOMEM;
45754 } else {
45755 d_set_d_op(child, &proc_sys_dentry_operations);
45756+
45757+ gr_handle_proc_create(child, inode);
45758+
45759 d_add(child, inode);
45760 }
45761 } else {
45762@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
45763 if (*pos < file->f_pos)
45764 continue;
45765
45766+ if (gr_handle_sysctl(table, 0))
45767+ continue;
45768+
45769 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45770 if (res)
45771 return res;
45772@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
45773 if (IS_ERR(head))
45774 return PTR_ERR(head);
45775
45776+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45777+ return -ENOENT;
45778+
45779 generic_fillattr(inode, stat);
45780 if (table)
45781 stat->mode = (stat->mode & S_IFMT) | table->mode;
45782@@ -374,13 +391,13 @@ static const struct file_operations proc
45783 .llseek = generic_file_llseek,
45784 };
45785
45786-static const struct inode_operations proc_sys_inode_operations = {
45787+const struct inode_operations proc_sys_inode_operations = {
45788 .permission = proc_sys_permission,
45789 .setattr = proc_sys_setattr,
45790 .getattr = proc_sys_getattr,
45791 };
45792
45793-static const struct inode_operations proc_sys_dir_operations = {
45794+const struct inode_operations proc_sys_dir_operations = {
45795 .lookup = proc_sys_lookup,
45796 .permission = proc_sys_permission,
45797 .setattr = proc_sys_setattr,
45798diff -urNp linux-3.0.7/fs/proc/root.c linux-3.0.7/fs/proc/root.c
45799--- linux-3.0.7/fs/proc/root.c 2011-07-21 22:17:23.000000000 -0400
45800+++ linux-3.0.7/fs/proc/root.c 2011-08-23 21:48:14.000000000 -0400
45801@@ -123,7 +123,15 @@ void __init proc_root_init(void)
45802 #ifdef CONFIG_PROC_DEVICETREE
45803 proc_device_tree_init();
45804 #endif
45805+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45806+#ifdef CONFIG_GRKERNSEC_PROC_USER
45807+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45808+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45809+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45810+#endif
45811+#else
45812 proc_mkdir("bus", NULL);
45813+#endif
45814 proc_sys_init();
45815 }
45816
45817diff -urNp linux-3.0.7/fs/proc/task_mmu.c linux-3.0.7/fs/proc/task_mmu.c
45818--- linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:54:54.000000000 -0400
45819+++ linux-3.0.7/fs/proc/task_mmu.c 2011-10-16 21:55:28.000000000 -0400
45820@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
45821 "VmExe:\t%8lu kB\n"
45822 "VmLib:\t%8lu kB\n"
45823 "VmPTE:\t%8lu kB\n"
45824- "VmSwap:\t%8lu kB\n",
45825- hiwater_vm << (PAGE_SHIFT-10),
45826+ "VmSwap:\t%8lu kB\n"
45827+
45828+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45829+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45830+#endif
45831+
45832+ ,hiwater_vm << (PAGE_SHIFT-10),
45833 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45834 mm->locked_vm << (PAGE_SHIFT-10),
45835 hiwater_rss << (PAGE_SHIFT-10),
45836@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
45837 data << (PAGE_SHIFT-10),
45838 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45839 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
45840- swap << (PAGE_SHIFT-10));
45841+ swap << (PAGE_SHIFT-10)
45842+
45843+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45844+ , mm->context.user_cs_base, mm->context.user_cs_limit
45845+#endif
45846+
45847+ );
45848 }
45849
45850 unsigned long task_vsize(struct mm_struct *mm)
45851@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
45852 return ret;
45853 }
45854
45855+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45856+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45857+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45858+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45859+#endif
45860+
45861 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45862 {
45863 struct mm_struct *mm = vma->vm_mm;
45864@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
45865 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45866 }
45867
45868- /* We don't show the stack guard page in /proc/maps */
45869+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45870+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
45871+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
45872+#else
45873 start = vma->vm_start;
45874- if (stack_guard_page_start(vma, start))
45875- start += PAGE_SIZE;
45876 end = vma->vm_end;
45877- if (stack_guard_page_end(vma, end))
45878- end -= PAGE_SIZE;
45879+#endif
45880
45881 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45882 start,
45883@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
45884 flags & VM_WRITE ? 'w' : '-',
45885 flags & VM_EXEC ? 'x' : '-',
45886 flags & VM_MAYSHARE ? 's' : 'p',
45887+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45888+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45889+#else
45890 pgoff,
45891+#endif
45892 MAJOR(dev), MINOR(dev), ino, &len);
45893
45894 /*
45895@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
45896 */
45897 if (file) {
45898 pad_len_spaces(m, len);
45899- seq_path(m, &file->f_path, "\n");
45900+ seq_path(m, &file->f_path, "\n\\");
45901 } else {
45902 const char *name = arch_vma_name(vma);
45903 if (!name) {
45904@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
45905 if (vma->vm_start <= mm->brk &&
45906 vma->vm_end >= mm->start_brk) {
45907 name = "[heap]";
45908- } else if (vma->vm_start <= mm->start_stack &&
45909- vma->vm_end >= mm->start_stack) {
45910+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45911+ (vma->vm_start <= mm->start_stack &&
45912+ vma->vm_end >= mm->start_stack)) {
45913 name = "[stack]";
45914 }
45915 } else {
45916@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
45917 };
45918
45919 memset(&mss, 0, sizeof mss);
45920- mss.vma = vma;
45921- /* mmap_sem is held in m_start */
45922- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45923- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45924-
45925+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45926+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45927+#endif
45928+ mss.vma = vma;
45929+ /* mmap_sem is held in m_start */
45930+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45931+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45932+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45933+ }
45934+#endif
45935 show_map_vma(m, vma);
45936
45937 seq_printf(m,
45938@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
45939 "KernelPageSize: %8lu kB\n"
45940 "MMUPageSize: %8lu kB\n"
45941 "Locked: %8lu kB\n",
45942+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45943+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45944+#else
45945 (vma->vm_end - vma->vm_start) >> 10,
45946+#endif
45947 mss.resident >> 10,
45948 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45949 mss.shared_clean >> 10,
45950@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
45951
45952 if (file) {
45953 seq_printf(m, " file=");
45954- seq_path(m, &file->f_path, "\n\t= ");
45955+ seq_path(m, &file->f_path, "\n\t\\= ");
45956 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
45957 seq_printf(m, " heap");
45958 } else if (vma->vm_start <= mm->start_stack &&
45959diff -urNp linux-3.0.7/fs/proc/task_nommu.c linux-3.0.7/fs/proc/task_nommu.c
45960--- linux-3.0.7/fs/proc/task_nommu.c 2011-07-21 22:17:23.000000000 -0400
45961+++ linux-3.0.7/fs/proc/task_nommu.c 2011-08-23 21:47:56.000000000 -0400
45962@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
45963 else
45964 bytes += kobjsize(mm);
45965
45966- if (current->fs && current->fs->users > 1)
45967+ if (current->fs && atomic_read(&current->fs->users) > 1)
45968 sbytes += kobjsize(current->fs);
45969 else
45970 bytes += kobjsize(current->fs);
45971@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
45972
45973 if (file) {
45974 pad_len_spaces(m, len);
45975- seq_path(m, &file->f_path, "");
45976+ seq_path(m, &file->f_path, "\n\\");
45977 } else if (mm) {
45978 if (vma->vm_start <= mm->start_stack &&
45979 vma->vm_end >= mm->start_stack) {
45980diff -urNp linux-3.0.7/fs/quota/netlink.c linux-3.0.7/fs/quota/netlink.c
45981--- linux-3.0.7/fs/quota/netlink.c 2011-07-21 22:17:23.000000000 -0400
45982+++ linux-3.0.7/fs/quota/netlink.c 2011-08-23 21:47:56.000000000 -0400
45983@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
45984 void quota_send_warning(short type, unsigned int id, dev_t dev,
45985 const char warntype)
45986 {
45987- static atomic_t seq;
45988+ static atomic_unchecked_t seq;
45989 struct sk_buff *skb;
45990 void *msg_head;
45991 int ret;
45992@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
45993 "VFS: Not enough memory to send quota warning.\n");
45994 return;
45995 }
45996- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
45997+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
45998 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
45999 if (!msg_head) {
46000 printk(KERN_ERR
46001diff -urNp linux-3.0.7/fs/readdir.c linux-3.0.7/fs/readdir.c
46002--- linux-3.0.7/fs/readdir.c 2011-07-21 22:17:23.000000000 -0400
46003+++ linux-3.0.7/fs/readdir.c 2011-10-06 04:17:55.000000000 -0400
46004@@ -17,6 +17,7 @@
46005 #include <linux/security.h>
46006 #include <linux/syscalls.h>
46007 #include <linux/unistd.h>
46008+#include <linux/namei.h>
46009
46010 #include <asm/uaccess.h>
46011
46012@@ -67,6 +68,7 @@ struct old_linux_dirent {
46013
46014 struct readdir_callback {
46015 struct old_linux_dirent __user * dirent;
46016+ struct file * file;
46017 int result;
46018 };
46019
46020@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46021 buf->result = -EOVERFLOW;
46022 return -EOVERFLOW;
46023 }
46024+
46025+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46026+ return 0;
46027+
46028 buf->result++;
46029 dirent = buf->dirent;
46030 if (!access_ok(VERIFY_WRITE, dirent,
46031@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46032
46033 buf.result = 0;
46034 buf.dirent = dirent;
46035+ buf.file = file;
46036
46037 error = vfs_readdir(file, fillonedir, &buf);
46038 if (buf.result)
46039@@ -142,6 +149,7 @@ struct linux_dirent {
46040 struct getdents_callback {
46041 struct linux_dirent __user * current_dir;
46042 struct linux_dirent __user * previous;
46043+ struct file * file;
46044 int count;
46045 int error;
46046 };
46047@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46048 buf->error = -EOVERFLOW;
46049 return -EOVERFLOW;
46050 }
46051+
46052+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46053+ return 0;
46054+
46055 dirent = buf->previous;
46056 if (dirent) {
46057 if (__put_user(offset, &dirent->d_off))
46058@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46059 buf.previous = NULL;
46060 buf.count = count;
46061 buf.error = 0;
46062+ buf.file = file;
46063
46064 error = vfs_readdir(file, filldir, &buf);
46065 if (error >= 0)
46066@@ -229,6 +242,7 @@ out:
46067 struct getdents_callback64 {
46068 struct linux_dirent64 __user * current_dir;
46069 struct linux_dirent64 __user * previous;
46070+ struct file *file;
46071 int count;
46072 int error;
46073 };
46074@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46075 buf->error = -EINVAL; /* only used if we fail.. */
46076 if (reclen > buf->count)
46077 return -EINVAL;
46078+
46079+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46080+ return 0;
46081+
46082 dirent = buf->previous;
46083 if (dirent) {
46084 if (__put_user(offset, &dirent->d_off))
46085@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46086
46087 buf.current_dir = dirent;
46088 buf.previous = NULL;
46089+ buf.file = file;
46090 buf.count = count;
46091 buf.error = 0;
46092
46093@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46094 error = buf.error;
46095 lastdirent = buf.previous;
46096 if (lastdirent) {
46097- typeof(lastdirent->d_off) d_off = file->f_pos;
46098+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46099 if (__put_user(d_off, &lastdirent->d_off))
46100 error = -EFAULT;
46101 else
46102diff -urNp linux-3.0.7/fs/reiserfs/dir.c linux-3.0.7/fs/reiserfs/dir.c
46103--- linux-3.0.7/fs/reiserfs/dir.c 2011-07-21 22:17:23.000000000 -0400
46104+++ linux-3.0.7/fs/reiserfs/dir.c 2011-08-23 21:48:14.000000000 -0400
46105@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46106 struct reiserfs_dir_entry de;
46107 int ret = 0;
46108
46109+ pax_track_stack();
46110+
46111 reiserfs_write_lock(inode->i_sb);
46112
46113 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46114diff -urNp linux-3.0.7/fs/reiserfs/do_balan.c linux-3.0.7/fs/reiserfs/do_balan.c
46115--- linux-3.0.7/fs/reiserfs/do_balan.c 2011-07-21 22:17:23.000000000 -0400
46116+++ linux-3.0.7/fs/reiserfs/do_balan.c 2011-08-23 21:47:56.000000000 -0400
46117@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46118 return;
46119 }
46120
46121- atomic_inc(&(fs_generation(tb->tb_sb)));
46122+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46123 do_balance_starts(tb);
46124
46125 /* balance leaf returns 0 except if combining L R and S into
46126diff -urNp linux-3.0.7/fs/reiserfs/journal.c linux-3.0.7/fs/reiserfs/journal.c
46127--- linux-3.0.7/fs/reiserfs/journal.c 2011-07-21 22:17:23.000000000 -0400
46128+++ linux-3.0.7/fs/reiserfs/journal.c 2011-08-23 21:48:14.000000000 -0400
46129@@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
46130 struct buffer_head *bh;
46131 int i, j;
46132
46133+ pax_track_stack();
46134+
46135 bh = __getblk(dev, block, bufsize);
46136 if (buffer_uptodate(bh))
46137 return (bh);
46138diff -urNp linux-3.0.7/fs/reiserfs/namei.c linux-3.0.7/fs/reiserfs/namei.c
46139--- linux-3.0.7/fs/reiserfs/namei.c 2011-07-21 22:17:23.000000000 -0400
46140+++ linux-3.0.7/fs/reiserfs/namei.c 2011-08-23 21:48:14.000000000 -0400
46141@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46142 unsigned long savelink = 1;
46143 struct timespec ctime;
46144
46145+ pax_track_stack();
46146+
46147 /* three balancings: (1) old name removal, (2) new name insertion
46148 and (3) maybe "save" link insertion
46149 stat data updates: (1) old directory,
46150diff -urNp linux-3.0.7/fs/reiserfs/procfs.c linux-3.0.7/fs/reiserfs/procfs.c
46151--- linux-3.0.7/fs/reiserfs/procfs.c 2011-07-21 22:17:23.000000000 -0400
46152+++ linux-3.0.7/fs/reiserfs/procfs.c 2011-08-23 21:48:14.000000000 -0400
46153@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46154 "SMALL_TAILS " : "NO_TAILS ",
46155 replay_only(sb) ? "REPLAY_ONLY " : "",
46156 convert_reiserfs(sb) ? "CONV " : "",
46157- atomic_read(&r->s_generation_counter),
46158+ atomic_read_unchecked(&r->s_generation_counter),
46159 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46160 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46161 SF(s_good_search_by_key_reada), SF(s_bmaps),
46162@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46163 struct journal_params *jp = &rs->s_v1.s_journal;
46164 char b[BDEVNAME_SIZE];
46165
46166+ pax_track_stack();
46167+
46168 seq_printf(m, /* on-disk fields */
46169 "jp_journal_1st_block: \t%i\n"
46170 "jp_journal_dev: \t%s[%x]\n"
46171diff -urNp linux-3.0.7/fs/reiserfs/stree.c linux-3.0.7/fs/reiserfs/stree.c
46172--- linux-3.0.7/fs/reiserfs/stree.c 2011-07-21 22:17:23.000000000 -0400
46173+++ linux-3.0.7/fs/reiserfs/stree.c 2011-08-23 21:48:14.000000000 -0400
46174@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46175 int iter = 0;
46176 #endif
46177
46178+ pax_track_stack();
46179+
46180 BUG_ON(!th->t_trans_id);
46181
46182 init_tb_struct(th, &s_del_balance, sb, path,
46183@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46184 int retval;
46185 int quota_cut_bytes = 0;
46186
46187+ pax_track_stack();
46188+
46189 BUG_ON(!th->t_trans_id);
46190
46191 le_key2cpu_key(&cpu_key, key);
46192@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46193 int quota_cut_bytes;
46194 loff_t tail_pos = 0;
46195
46196+ pax_track_stack();
46197+
46198 BUG_ON(!th->t_trans_id);
46199
46200 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46201@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46202 int retval;
46203 int fs_gen;
46204
46205+ pax_track_stack();
46206+
46207 BUG_ON(!th->t_trans_id);
46208
46209 fs_gen = get_generation(inode->i_sb);
46210@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46211 int fs_gen = 0;
46212 int quota_bytes = 0;
46213
46214+ pax_track_stack();
46215+
46216 BUG_ON(!th->t_trans_id);
46217
46218 if (inode) { /* Do we count quotas for item? */
46219diff -urNp linux-3.0.7/fs/reiserfs/super.c linux-3.0.7/fs/reiserfs/super.c
46220--- linux-3.0.7/fs/reiserfs/super.c 2011-07-21 22:17:23.000000000 -0400
46221+++ linux-3.0.7/fs/reiserfs/super.c 2011-08-23 21:48:14.000000000 -0400
46222@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46223 {.option_name = NULL}
46224 };
46225
46226+ pax_track_stack();
46227+
46228 *blocks = 0;
46229 if (!options || !*options)
46230 /* use default configuration: create tails, journaling on, no
46231diff -urNp linux-3.0.7/fs/select.c linux-3.0.7/fs/select.c
46232--- linux-3.0.7/fs/select.c 2011-07-21 22:17:23.000000000 -0400
46233+++ linux-3.0.7/fs/select.c 2011-08-23 21:48:14.000000000 -0400
46234@@ -20,6 +20,7 @@
46235 #include <linux/module.h>
46236 #include <linux/slab.h>
46237 #include <linux/poll.h>
46238+#include <linux/security.h>
46239 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46240 #include <linux/file.h>
46241 #include <linux/fdtable.h>
46242@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46243 int retval, i, timed_out = 0;
46244 unsigned long slack = 0;
46245
46246+ pax_track_stack();
46247+
46248 rcu_read_lock();
46249 retval = max_select_fd(n, fds);
46250 rcu_read_unlock();
46251@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46252 /* Allocate small arguments on the stack to save memory and be faster */
46253 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46254
46255+ pax_track_stack();
46256+
46257 ret = -EINVAL;
46258 if (n < 0)
46259 goto out_nofds;
46260@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46261 struct poll_list *walk = head;
46262 unsigned long todo = nfds;
46263
46264+ pax_track_stack();
46265+
46266+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46267 if (nfds > rlimit(RLIMIT_NOFILE))
46268 return -EINVAL;
46269
46270diff -urNp linux-3.0.7/fs/seq_file.c linux-3.0.7/fs/seq_file.c
46271--- linux-3.0.7/fs/seq_file.c 2011-07-21 22:17:23.000000000 -0400
46272+++ linux-3.0.7/fs/seq_file.c 2011-08-23 21:47:56.000000000 -0400
46273@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46274 return 0;
46275 }
46276 if (!m->buf) {
46277- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46278+ m->size = PAGE_SIZE;
46279+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46280 if (!m->buf)
46281 return -ENOMEM;
46282 }
46283@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46284 Eoverflow:
46285 m->op->stop(m, p);
46286 kfree(m->buf);
46287- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46288+ m->size <<= 1;
46289+ m->buf = kmalloc(m->size, GFP_KERNEL);
46290 return !m->buf ? -ENOMEM : -EAGAIN;
46291 }
46292
46293@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46294 m->version = file->f_version;
46295 /* grab buffer if we didn't have one */
46296 if (!m->buf) {
46297- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46298+ m->size = PAGE_SIZE;
46299+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46300 if (!m->buf)
46301 goto Enomem;
46302 }
46303@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46304 goto Fill;
46305 m->op->stop(m, p);
46306 kfree(m->buf);
46307- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46308+ m->size <<= 1;
46309+ m->buf = kmalloc(m->size, GFP_KERNEL);
46310 if (!m->buf)
46311 goto Enomem;
46312 m->count = 0;
46313@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46314 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46315 void *data)
46316 {
46317- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46318+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46319 int res = -ENOMEM;
46320
46321 if (op) {
46322diff -urNp linux-3.0.7/fs/splice.c linux-3.0.7/fs/splice.c
46323--- linux-3.0.7/fs/splice.c 2011-07-21 22:17:23.000000000 -0400
46324+++ linux-3.0.7/fs/splice.c 2011-10-06 04:17:55.000000000 -0400
46325@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46326 pipe_lock(pipe);
46327
46328 for (;;) {
46329- if (!pipe->readers) {
46330+ if (!atomic_read(&pipe->readers)) {
46331 send_sig(SIGPIPE, current, 0);
46332 if (!ret)
46333 ret = -EPIPE;
46334@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46335 do_wakeup = 0;
46336 }
46337
46338- pipe->waiting_writers++;
46339+ atomic_inc(&pipe->waiting_writers);
46340 pipe_wait(pipe);
46341- pipe->waiting_writers--;
46342+ atomic_dec(&pipe->waiting_writers);
46343 }
46344
46345 pipe_unlock(pipe);
46346@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46347 .spd_release = spd_release_page,
46348 };
46349
46350+ pax_track_stack();
46351+
46352 if (splice_grow_spd(pipe, &spd))
46353 return -ENOMEM;
46354
46355@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46356 old_fs = get_fs();
46357 set_fs(get_ds());
46358 /* The cast to a user pointer is valid due to the set_fs() */
46359- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46360+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46361 set_fs(old_fs);
46362
46363 return res;
46364@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46365 old_fs = get_fs();
46366 set_fs(get_ds());
46367 /* The cast to a user pointer is valid due to the set_fs() */
46368- res = vfs_write(file, (const char __user *)buf, count, &pos);
46369+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46370 set_fs(old_fs);
46371
46372 return res;
46373@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46374 .spd_release = spd_release_page,
46375 };
46376
46377+ pax_track_stack();
46378+
46379 if (splice_grow_spd(pipe, &spd))
46380 return -ENOMEM;
46381
46382@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46383 goto err;
46384
46385 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46386- vec[i].iov_base = (void __user *) page_address(page);
46387+ vec[i].iov_base = (void __force_user *) page_address(page);
46388 vec[i].iov_len = this_len;
46389 spd.pages[i] = page;
46390 spd.nr_pages++;
46391@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46392 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46393 {
46394 while (!pipe->nrbufs) {
46395- if (!pipe->writers)
46396+ if (!atomic_read(&pipe->writers))
46397 return 0;
46398
46399- if (!pipe->waiting_writers && sd->num_spliced)
46400+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46401 return 0;
46402
46403 if (sd->flags & SPLICE_F_NONBLOCK)
46404@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46405 * out of the pipe right after the splice_to_pipe(). So set
46406 * PIPE_READERS appropriately.
46407 */
46408- pipe->readers = 1;
46409+ atomic_set(&pipe->readers, 1);
46410
46411 current->splice_pipe = pipe;
46412 }
46413@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46414 };
46415 long ret;
46416
46417+ pax_track_stack();
46418+
46419 pipe = get_pipe_info(file);
46420 if (!pipe)
46421 return -EBADF;
46422@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46423 ret = -ERESTARTSYS;
46424 break;
46425 }
46426- if (!pipe->writers)
46427+ if (!atomic_read(&pipe->writers))
46428 break;
46429- if (!pipe->waiting_writers) {
46430+ if (!atomic_read(&pipe->waiting_writers)) {
46431 if (flags & SPLICE_F_NONBLOCK) {
46432 ret = -EAGAIN;
46433 break;
46434@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46435 pipe_lock(pipe);
46436
46437 while (pipe->nrbufs >= pipe->buffers) {
46438- if (!pipe->readers) {
46439+ if (!atomic_read(&pipe->readers)) {
46440 send_sig(SIGPIPE, current, 0);
46441 ret = -EPIPE;
46442 break;
46443@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46444 ret = -ERESTARTSYS;
46445 break;
46446 }
46447- pipe->waiting_writers++;
46448+ atomic_inc(&pipe->waiting_writers);
46449 pipe_wait(pipe);
46450- pipe->waiting_writers--;
46451+ atomic_dec(&pipe->waiting_writers);
46452 }
46453
46454 pipe_unlock(pipe);
46455@@ -1819,14 +1825,14 @@ retry:
46456 pipe_double_lock(ipipe, opipe);
46457
46458 do {
46459- if (!opipe->readers) {
46460+ if (!atomic_read(&opipe->readers)) {
46461 send_sig(SIGPIPE, current, 0);
46462 if (!ret)
46463 ret = -EPIPE;
46464 break;
46465 }
46466
46467- if (!ipipe->nrbufs && !ipipe->writers)
46468+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46469 break;
46470
46471 /*
46472@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46473 pipe_double_lock(ipipe, opipe);
46474
46475 do {
46476- if (!opipe->readers) {
46477+ if (!atomic_read(&opipe->readers)) {
46478 send_sig(SIGPIPE, current, 0);
46479 if (!ret)
46480 ret = -EPIPE;
46481@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46482 * return EAGAIN if we have the potential of some data in the
46483 * future, otherwise just return 0
46484 */
46485- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46486+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46487 ret = -EAGAIN;
46488
46489 pipe_unlock(ipipe);
46490diff -urNp linux-3.0.7/fs/sysfs/file.c linux-3.0.7/fs/sysfs/file.c
46491--- linux-3.0.7/fs/sysfs/file.c 2011-07-21 22:17:23.000000000 -0400
46492+++ linux-3.0.7/fs/sysfs/file.c 2011-08-23 21:47:56.000000000 -0400
46493@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46494
46495 struct sysfs_open_dirent {
46496 atomic_t refcnt;
46497- atomic_t event;
46498+ atomic_unchecked_t event;
46499 wait_queue_head_t poll;
46500 struct list_head buffers; /* goes through sysfs_buffer.list */
46501 };
46502@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46503 if (!sysfs_get_active(attr_sd))
46504 return -ENODEV;
46505
46506- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46507+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46508 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46509
46510 sysfs_put_active(attr_sd);
46511@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46512 return -ENOMEM;
46513
46514 atomic_set(&new_od->refcnt, 0);
46515- atomic_set(&new_od->event, 1);
46516+ atomic_set_unchecked(&new_od->event, 1);
46517 init_waitqueue_head(&new_od->poll);
46518 INIT_LIST_HEAD(&new_od->buffers);
46519 goto retry;
46520@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46521
46522 sysfs_put_active(attr_sd);
46523
46524- if (buffer->event != atomic_read(&od->event))
46525+ if (buffer->event != atomic_read_unchecked(&od->event))
46526 goto trigger;
46527
46528 return DEFAULT_POLLMASK;
46529@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46530
46531 od = sd->s_attr.open;
46532 if (od) {
46533- atomic_inc(&od->event);
46534+ atomic_inc_unchecked(&od->event);
46535 wake_up_interruptible(&od->poll);
46536 }
46537
46538diff -urNp linux-3.0.7/fs/sysfs/mount.c linux-3.0.7/fs/sysfs/mount.c
46539--- linux-3.0.7/fs/sysfs/mount.c 2011-07-21 22:17:23.000000000 -0400
46540+++ linux-3.0.7/fs/sysfs/mount.c 2011-08-23 21:48:14.000000000 -0400
46541@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46542 .s_name = "",
46543 .s_count = ATOMIC_INIT(1),
46544 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46545+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46546+ .s_mode = S_IFDIR | S_IRWXU,
46547+#else
46548 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46549+#endif
46550 .s_ino = 1,
46551 };
46552
46553diff -urNp linux-3.0.7/fs/sysfs/symlink.c linux-3.0.7/fs/sysfs/symlink.c
46554--- linux-3.0.7/fs/sysfs/symlink.c 2011-07-21 22:17:23.000000000 -0400
46555+++ linux-3.0.7/fs/sysfs/symlink.c 2011-08-23 21:47:56.000000000 -0400
46556@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
46557
46558 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46559 {
46560- char *page = nd_get_link(nd);
46561+ const char *page = nd_get_link(nd);
46562 if (!IS_ERR(page))
46563 free_page((unsigned long)page);
46564 }
46565diff -urNp linux-3.0.7/fs/udf/inode.c linux-3.0.7/fs/udf/inode.c
46566--- linux-3.0.7/fs/udf/inode.c 2011-07-21 22:17:23.000000000 -0400
46567+++ linux-3.0.7/fs/udf/inode.c 2011-08-23 21:48:14.000000000 -0400
46568@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
46569 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46570 int lastblock = 0;
46571
46572+ pax_track_stack();
46573+
46574 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46575 prev_epos.block = iinfo->i_location;
46576 prev_epos.bh = NULL;
46577diff -urNp linux-3.0.7/fs/udf/misc.c linux-3.0.7/fs/udf/misc.c
46578--- linux-3.0.7/fs/udf/misc.c 2011-07-21 22:17:23.000000000 -0400
46579+++ linux-3.0.7/fs/udf/misc.c 2011-08-23 21:47:56.000000000 -0400
46580@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46581
46582 u8 udf_tag_checksum(const struct tag *t)
46583 {
46584- u8 *data = (u8 *)t;
46585+ const u8 *data = (const u8 *)t;
46586 u8 checksum = 0;
46587 int i;
46588 for (i = 0; i < sizeof(struct tag); ++i)
46589diff -urNp linux-3.0.7/fs/utimes.c linux-3.0.7/fs/utimes.c
46590--- linux-3.0.7/fs/utimes.c 2011-07-21 22:17:23.000000000 -0400
46591+++ linux-3.0.7/fs/utimes.c 2011-08-23 21:48:14.000000000 -0400
46592@@ -1,6 +1,7 @@
46593 #include <linux/compiler.h>
46594 #include <linux/file.h>
46595 #include <linux/fs.h>
46596+#include <linux/security.h>
46597 #include <linux/linkage.h>
46598 #include <linux/mount.h>
46599 #include <linux/namei.h>
46600@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46601 goto mnt_drop_write_and_out;
46602 }
46603 }
46604+
46605+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46606+ error = -EACCES;
46607+ goto mnt_drop_write_and_out;
46608+ }
46609+
46610 mutex_lock(&inode->i_mutex);
46611 error = notify_change(path->dentry, &newattrs);
46612 mutex_unlock(&inode->i_mutex);
46613diff -urNp linux-3.0.7/fs/xattr_acl.c linux-3.0.7/fs/xattr_acl.c
46614--- linux-3.0.7/fs/xattr_acl.c 2011-07-21 22:17:23.000000000 -0400
46615+++ linux-3.0.7/fs/xattr_acl.c 2011-08-23 21:47:56.000000000 -0400
46616@@ -17,8 +17,8 @@
46617 struct posix_acl *
46618 posix_acl_from_xattr(const void *value, size_t size)
46619 {
46620- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46621- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46622+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46623+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46624 int count;
46625 struct posix_acl *acl;
46626 struct posix_acl_entry *acl_e;
46627diff -urNp linux-3.0.7/fs/xattr.c linux-3.0.7/fs/xattr.c
46628--- linux-3.0.7/fs/xattr.c 2011-07-21 22:17:23.000000000 -0400
46629+++ linux-3.0.7/fs/xattr.c 2011-08-23 21:48:14.000000000 -0400
46630@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46631 * Extended attribute SET operations
46632 */
46633 static long
46634-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46635+setxattr(struct path *path, const char __user *name, const void __user *value,
46636 size_t size, int flags)
46637 {
46638 int error;
46639@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
46640 return PTR_ERR(kvalue);
46641 }
46642
46643- error = vfs_setxattr(d, kname, kvalue, size, flags);
46644+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46645+ error = -EACCES;
46646+ goto out;
46647+ }
46648+
46649+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46650+out:
46651 kfree(kvalue);
46652 return error;
46653 }
46654@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46655 return error;
46656 error = mnt_want_write(path.mnt);
46657 if (!error) {
46658- error = setxattr(path.dentry, name, value, size, flags);
46659+ error = setxattr(&path, name, value, size, flags);
46660 mnt_drop_write(path.mnt);
46661 }
46662 path_put(&path);
46663@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46664 return error;
46665 error = mnt_want_write(path.mnt);
46666 if (!error) {
46667- error = setxattr(path.dentry, name, value, size, flags);
46668+ error = setxattr(&path, name, value, size, flags);
46669 mnt_drop_write(path.mnt);
46670 }
46671 path_put(&path);
46672@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46673 const void __user *,value, size_t, size, int, flags)
46674 {
46675 struct file *f;
46676- struct dentry *dentry;
46677 int error = -EBADF;
46678
46679 f = fget(fd);
46680 if (!f)
46681 return error;
46682- dentry = f->f_path.dentry;
46683- audit_inode(NULL, dentry);
46684+ audit_inode(NULL, f->f_path.dentry);
46685 error = mnt_want_write_file(f);
46686 if (!error) {
46687- error = setxattr(dentry, name, value, size, flags);
46688+ error = setxattr(&f->f_path, name, value, size, flags);
46689 mnt_drop_write(f->f_path.mnt);
46690 }
46691 fput(f);
46692diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c
46693--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-07-21 22:17:23.000000000 -0400
46694+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-23 21:48:14.000000000 -0400
46695@@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
46696 xfs_fsop_geom_t fsgeo;
46697 int error;
46698
46699+ memset(&fsgeo, 0, sizeof(fsgeo));
46700 error = xfs_fs_geometry(mp, &fsgeo, 3);
46701 if (error)
46702 return -error;
46703diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c
46704--- linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-07-21 22:17:23.000000000 -0400
46705+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-23 21:47:56.000000000 -0400
46706@@ -128,7 +128,7 @@ xfs_find_handle(
46707 }
46708
46709 error = -EFAULT;
46710- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46711+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46712 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46713 goto out_put;
46714
46715diff -urNp linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c
46716--- linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-07-21 22:17:23.000000000 -0400
46717+++ linux-3.0.7/fs/xfs/linux-2.6/xfs_iops.c 2011-08-23 21:47:56.000000000 -0400
46718@@ -437,7 +437,7 @@ xfs_vn_put_link(
46719 struct nameidata *nd,
46720 void *p)
46721 {
46722- char *s = nd_get_link(nd);
46723+ const char *s = nd_get_link(nd);
46724
46725 if (!IS_ERR(s))
46726 kfree(s);
46727diff -urNp linux-3.0.7/fs/xfs/xfs_bmap.c linux-3.0.7/fs/xfs/xfs_bmap.c
46728--- linux-3.0.7/fs/xfs/xfs_bmap.c 2011-07-21 22:17:23.000000000 -0400
46729+++ linux-3.0.7/fs/xfs/xfs_bmap.c 2011-08-23 21:47:56.000000000 -0400
46730@@ -253,7 +253,7 @@ xfs_bmap_validate_ret(
46731 int nmap,
46732 int ret_nmap);
46733 #else
46734-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46735+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46736 #endif /* DEBUG */
46737
46738 STATIC int
46739diff -urNp linux-3.0.7/fs/xfs/xfs_dir2_sf.c linux-3.0.7/fs/xfs/xfs_dir2_sf.c
46740--- linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-07-21 22:17:23.000000000 -0400
46741+++ linux-3.0.7/fs/xfs/xfs_dir2_sf.c 2011-08-23 21:47:56.000000000 -0400
46742@@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
46743 }
46744
46745 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46746- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46747+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46748+ char name[sfep->namelen];
46749+ memcpy(name, sfep->name, sfep->namelen);
46750+ if (filldir(dirent, name, sfep->namelen,
46751+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46752+ *offset = off & 0x7fffffff;
46753+ return 0;
46754+ }
46755+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
46756 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46757 *offset = off & 0x7fffffff;
46758 return 0;
46759diff -urNp linux-3.0.7/grsecurity/gracl_alloc.c linux-3.0.7/grsecurity/gracl_alloc.c
46760--- linux-3.0.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46761+++ linux-3.0.7/grsecurity/gracl_alloc.c 2011-08-23 21:48:14.000000000 -0400
46762@@ -0,0 +1,105 @@
46763+#include <linux/kernel.h>
46764+#include <linux/mm.h>
46765+#include <linux/slab.h>
46766+#include <linux/vmalloc.h>
46767+#include <linux/gracl.h>
46768+#include <linux/grsecurity.h>
46769+
46770+static unsigned long alloc_stack_next = 1;
46771+static unsigned long alloc_stack_size = 1;
46772+static void **alloc_stack;
46773+
46774+static __inline__ int
46775+alloc_pop(void)
46776+{
46777+ if (alloc_stack_next == 1)
46778+ return 0;
46779+
46780+ kfree(alloc_stack[alloc_stack_next - 2]);
46781+
46782+ alloc_stack_next--;
46783+
46784+ return 1;
46785+}
46786+
46787+static __inline__ int
46788+alloc_push(void *buf)
46789+{
46790+ if (alloc_stack_next >= alloc_stack_size)
46791+ return 1;
46792+
46793+ alloc_stack[alloc_stack_next - 1] = buf;
46794+
46795+ alloc_stack_next++;
46796+
46797+ return 0;
46798+}
46799+
46800+void *
46801+acl_alloc(unsigned long len)
46802+{
46803+ void *ret = NULL;
46804+
46805+ if (!len || len > PAGE_SIZE)
46806+ goto out;
46807+
46808+ ret = kmalloc(len, GFP_KERNEL);
46809+
46810+ if (ret) {
46811+ if (alloc_push(ret)) {
46812+ kfree(ret);
46813+ ret = NULL;
46814+ }
46815+ }
46816+
46817+out:
46818+ return ret;
46819+}
46820+
46821+void *
46822+acl_alloc_num(unsigned long num, unsigned long len)
46823+{
46824+ if (!len || (num > (PAGE_SIZE / len)))
46825+ return NULL;
46826+
46827+ return acl_alloc(num * len);
46828+}
46829+
46830+void
46831+acl_free_all(void)
46832+{
46833+ if (gr_acl_is_enabled() || !alloc_stack)
46834+ return;
46835+
46836+ while (alloc_pop()) ;
46837+
46838+ if (alloc_stack) {
46839+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46840+ kfree(alloc_stack);
46841+ else
46842+ vfree(alloc_stack);
46843+ }
46844+
46845+ alloc_stack = NULL;
46846+ alloc_stack_size = 1;
46847+ alloc_stack_next = 1;
46848+
46849+ return;
46850+}
46851+
46852+int
46853+acl_alloc_stack_init(unsigned long size)
46854+{
46855+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46856+ alloc_stack =
46857+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46858+ else
46859+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46860+
46861+ alloc_stack_size = size;
46862+
46863+ if (!alloc_stack)
46864+ return 0;
46865+ else
46866+ return 1;
46867+}
46868diff -urNp linux-3.0.7/grsecurity/gracl.c linux-3.0.7/grsecurity/gracl.c
46869--- linux-3.0.7/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46870+++ linux-3.0.7/grsecurity/gracl.c 2011-10-17 06:42:59.000000000 -0400
46871@@ -0,0 +1,4154 @@
46872+#include <linux/kernel.h>
46873+#include <linux/module.h>
46874+#include <linux/sched.h>
46875+#include <linux/mm.h>
46876+#include <linux/file.h>
46877+#include <linux/fs.h>
46878+#include <linux/namei.h>
46879+#include <linux/mount.h>
46880+#include <linux/tty.h>
46881+#include <linux/proc_fs.h>
46882+#include <linux/lglock.h>
46883+#include <linux/slab.h>
46884+#include <linux/vmalloc.h>
46885+#include <linux/types.h>
46886+#include <linux/sysctl.h>
46887+#include <linux/netdevice.h>
46888+#include <linux/ptrace.h>
46889+#include <linux/gracl.h>
46890+#include <linux/gralloc.h>
46891+#include <linux/grsecurity.h>
46892+#include <linux/grinternal.h>
46893+#include <linux/pid_namespace.h>
46894+#include <linux/fdtable.h>
46895+#include <linux/percpu.h>
46896+
46897+#include <asm/uaccess.h>
46898+#include <asm/errno.h>
46899+#include <asm/mman.h>
46900+
46901+static struct acl_role_db acl_role_set;
46902+static struct name_db name_set;
46903+static struct inodev_db inodev_set;
46904+
46905+/* for keeping track of userspace pointers used for subjects, so we
46906+ can share references in the kernel as well
46907+*/
46908+
46909+static struct path real_root;
46910+
46911+static struct acl_subj_map_db subj_map_set;
46912+
46913+static struct acl_role_label *default_role;
46914+
46915+static struct acl_role_label *role_list;
46916+
46917+static u16 acl_sp_role_value;
46918+
46919+extern char *gr_shared_page[4];
46920+static DEFINE_MUTEX(gr_dev_mutex);
46921+DEFINE_RWLOCK(gr_inode_lock);
46922+
46923+struct gr_arg *gr_usermode;
46924+
46925+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46926+
46927+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46928+extern void gr_clear_learn_entries(void);
46929+
46930+#ifdef CONFIG_GRKERNSEC_RESLOG
46931+extern void gr_log_resource(const struct task_struct *task,
46932+ const int res, const unsigned long wanted, const int gt);
46933+#endif
46934+
46935+unsigned char *gr_system_salt;
46936+unsigned char *gr_system_sum;
46937+
46938+static struct sprole_pw **acl_special_roles = NULL;
46939+static __u16 num_sprole_pws = 0;
46940+
46941+static struct acl_role_label *kernel_role = NULL;
46942+
46943+static unsigned int gr_auth_attempts = 0;
46944+static unsigned long gr_auth_expires = 0UL;
46945+
46946+#ifdef CONFIG_NET
46947+extern struct vfsmount *sock_mnt;
46948+#endif
46949+
46950+extern struct vfsmount *pipe_mnt;
46951+extern struct vfsmount *shm_mnt;
46952+#ifdef CONFIG_HUGETLBFS
46953+extern struct vfsmount *hugetlbfs_vfsmount;
46954+#endif
46955+
46956+static struct acl_object_label *fakefs_obj_rw;
46957+static struct acl_object_label *fakefs_obj_rwx;
46958+
46959+extern int gr_init_uidset(void);
46960+extern void gr_free_uidset(void);
46961+extern void gr_remove_uid(uid_t uid);
46962+extern int gr_find_uid(uid_t uid);
46963+
46964+DECLARE_BRLOCK(vfsmount_lock);
46965+
46966+__inline__ int
46967+gr_acl_is_enabled(void)
46968+{
46969+ return (gr_status & GR_READY);
46970+}
46971+
46972+#ifdef CONFIG_BTRFS_FS
46973+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46974+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46975+#endif
46976+
46977+static inline dev_t __get_dev(const struct dentry *dentry)
46978+{
46979+#ifdef CONFIG_BTRFS_FS
46980+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46981+ return get_btrfs_dev_from_inode(dentry->d_inode);
46982+ else
46983+#endif
46984+ return dentry->d_inode->i_sb->s_dev;
46985+}
46986+
46987+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46988+{
46989+ return __get_dev(dentry);
46990+}
46991+
46992+static char gr_task_roletype_to_char(struct task_struct *task)
46993+{
46994+ switch (task->role->roletype &
46995+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46996+ GR_ROLE_SPECIAL)) {
46997+ case GR_ROLE_DEFAULT:
46998+ return 'D';
46999+ case GR_ROLE_USER:
47000+ return 'U';
47001+ case GR_ROLE_GROUP:
47002+ return 'G';
47003+ case GR_ROLE_SPECIAL:
47004+ return 'S';
47005+ }
47006+
47007+ return 'X';
47008+}
47009+
47010+char gr_roletype_to_char(void)
47011+{
47012+ return gr_task_roletype_to_char(current);
47013+}
47014+
47015+__inline__ int
47016+gr_acl_tpe_check(void)
47017+{
47018+ if (unlikely(!(gr_status & GR_READY)))
47019+ return 0;
47020+ if (current->role->roletype & GR_ROLE_TPE)
47021+ return 1;
47022+ else
47023+ return 0;
47024+}
47025+
47026+int
47027+gr_handle_rawio(const struct inode *inode)
47028+{
47029+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47030+ if (inode && S_ISBLK(inode->i_mode) &&
47031+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47032+ !capable(CAP_SYS_RAWIO))
47033+ return 1;
47034+#endif
47035+ return 0;
47036+}
47037+
47038+static int
47039+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47040+{
47041+ if (likely(lena != lenb))
47042+ return 0;
47043+
47044+ return !memcmp(a, b, lena);
47045+}
47046+
47047+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47048+{
47049+ *buflen -= namelen;
47050+ if (*buflen < 0)
47051+ return -ENAMETOOLONG;
47052+ *buffer -= namelen;
47053+ memcpy(*buffer, str, namelen);
47054+ return 0;
47055+}
47056+
47057+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47058+{
47059+ return prepend(buffer, buflen, name->name, name->len);
47060+}
47061+
47062+static int prepend_path(const struct path *path, struct path *root,
47063+ char **buffer, int *buflen)
47064+{
47065+ struct dentry *dentry = path->dentry;
47066+ struct vfsmount *vfsmnt = path->mnt;
47067+ bool slash = false;
47068+ int error = 0;
47069+
47070+ while (dentry != root->dentry || vfsmnt != root->mnt) {
47071+ struct dentry * parent;
47072+
47073+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47074+ /* Global root? */
47075+ if (vfsmnt->mnt_parent == vfsmnt) {
47076+ goto out;
47077+ }
47078+ dentry = vfsmnt->mnt_mountpoint;
47079+ vfsmnt = vfsmnt->mnt_parent;
47080+ continue;
47081+ }
47082+ parent = dentry->d_parent;
47083+ prefetch(parent);
47084+ spin_lock(&dentry->d_lock);
47085+ error = prepend_name(buffer, buflen, &dentry->d_name);
47086+ spin_unlock(&dentry->d_lock);
47087+ if (!error)
47088+ error = prepend(buffer, buflen, "/", 1);
47089+ if (error)
47090+ break;
47091+
47092+ slash = true;
47093+ dentry = parent;
47094+ }
47095+
47096+out:
47097+ if (!error && !slash)
47098+ error = prepend(buffer, buflen, "/", 1);
47099+
47100+ return error;
47101+}
47102+
47103+/* this must be called with vfsmount_lock and rename_lock held */
47104+
47105+static char *__our_d_path(const struct path *path, struct path *root,
47106+ char *buf, int buflen)
47107+{
47108+ char *res = buf + buflen;
47109+ int error;
47110+
47111+ prepend(&res, &buflen, "\0", 1);
47112+ error = prepend_path(path, root, &res, &buflen);
47113+ if (error)
47114+ return ERR_PTR(error);
47115+
47116+ return res;
47117+}
47118+
47119+static char *
47120+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47121+{
47122+ char *retval;
47123+
47124+ retval = __our_d_path(path, root, buf, buflen);
47125+ if (unlikely(IS_ERR(retval)))
47126+ retval = strcpy(buf, "<path too long>");
47127+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47128+ retval[1] = '\0';
47129+
47130+ return retval;
47131+}
47132+
47133+static char *
47134+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47135+ char *buf, int buflen)
47136+{
47137+ struct path path;
47138+ char *res;
47139+
47140+ path.dentry = (struct dentry *)dentry;
47141+ path.mnt = (struct vfsmount *)vfsmnt;
47142+
47143+ /* we can use real_root.dentry, real_root.mnt, because this is only called
47144+ by the RBAC system */
47145+ res = gen_full_path(&path, &real_root, buf, buflen);
47146+
47147+ return res;
47148+}
47149+
47150+static char *
47151+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47152+ char *buf, int buflen)
47153+{
47154+ char *res;
47155+ struct path path;
47156+ struct path root;
47157+ struct task_struct *reaper = &init_task;
47158+
47159+ path.dentry = (struct dentry *)dentry;
47160+ path.mnt = (struct vfsmount *)vfsmnt;
47161+
47162+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47163+ get_fs_root(reaper->fs, &root);
47164+
47165+ write_seqlock(&rename_lock);
47166+ br_read_lock(vfsmount_lock);
47167+ res = gen_full_path(&path, &root, buf, buflen);
47168+ br_read_unlock(vfsmount_lock);
47169+ write_sequnlock(&rename_lock);
47170+
47171+ path_put(&root);
47172+ return res;
47173+}
47174+
47175+static char *
47176+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47177+{
47178+ char *ret;
47179+ write_seqlock(&rename_lock);
47180+ br_read_lock(vfsmount_lock);
47181+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47182+ PAGE_SIZE);
47183+ br_read_unlock(vfsmount_lock);
47184+ write_sequnlock(&rename_lock);
47185+ return ret;
47186+}
47187+
47188+static char *
47189+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47190+{
47191+ char *ret;
47192+ char *buf;
47193+ int buflen;
47194+
47195+ write_seqlock(&rename_lock);
47196+ br_read_lock(vfsmount_lock);
47197+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47198+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47199+ buflen = (int)(ret - buf);
47200+ if (buflen >= 5)
47201+ prepend(&ret, &buflen, "/proc", 5);
47202+ else
47203+ ret = strcpy(buf, "<path too long>");
47204+ br_read_unlock(vfsmount_lock);
47205+ write_sequnlock(&rename_lock);
47206+ return ret;
47207+}
47208+
47209+char *
47210+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47211+{
47212+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47213+ PAGE_SIZE);
47214+}
47215+
47216+char *
47217+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47218+{
47219+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47220+ PAGE_SIZE);
47221+}
47222+
47223+char *
47224+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47225+{
47226+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47227+ PAGE_SIZE);
47228+}
47229+
47230+char *
47231+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47232+{
47233+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47234+ PAGE_SIZE);
47235+}
47236+
47237+char *
47238+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47239+{
47240+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47241+ PAGE_SIZE);
47242+}
47243+
47244+__inline__ __u32
47245+to_gr_audit(const __u32 reqmode)
47246+{
47247+ /* masks off auditable permission flags, then shifts them to create
47248+ auditing flags, and adds the special case of append auditing if
47249+ we're requesting write */
47250+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47251+}
47252+
47253+struct acl_subject_label *
47254+lookup_subject_map(const struct acl_subject_label *userp)
47255+{
47256+ unsigned int index = shash(userp, subj_map_set.s_size);
47257+ struct subject_map *match;
47258+
47259+ match = subj_map_set.s_hash[index];
47260+
47261+ while (match && match->user != userp)
47262+ match = match->next;
47263+
47264+ if (match != NULL)
47265+ return match->kernel;
47266+ else
47267+ return NULL;
47268+}
47269+
47270+static void
47271+insert_subj_map_entry(struct subject_map *subjmap)
47272+{
47273+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47274+ struct subject_map **curr;
47275+
47276+ subjmap->prev = NULL;
47277+
47278+ curr = &subj_map_set.s_hash[index];
47279+ if (*curr != NULL)
47280+ (*curr)->prev = subjmap;
47281+
47282+ subjmap->next = *curr;
47283+ *curr = subjmap;
47284+
47285+ return;
47286+}
47287+
47288+static struct acl_role_label *
47289+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47290+ const gid_t gid)
47291+{
47292+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47293+ struct acl_role_label *match;
47294+ struct role_allowed_ip *ipp;
47295+ unsigned int x;
47296+ u32 curr_ip = task->signal->curr_ip;
47297+
47298+ task->signal->saved_ip = curr_ip;
47299+
47300+ match = acl_role_set.r_hash[index];
47301+
47302+ while (match) {
47303+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47304+ for (x = 0; x < match->domain_child_num; x++) {
47305+ if (match->domain_children[x] == uid)
47306+ goto found;
47307+ }
47308+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47309+ break;
47310+ match = match->next;
47311+ }
47312+found:
47313+ if (match == NULL) {
47314+ try_group:
47315+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47316+ match = acl_role_set.r_hash[index];
47317+
47318+ while (match) {
47319+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47320+ for (x = 0; x < match->domain_child_num; x++) {
47321+ if (match->domain_children[x] == gid)
47322+ goto found2;
47323+ }
47324+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47325+ break;
47326+ match = match->next;
47327+ }
47328+found2:
47329+ if (match == NULL)
47330+ match = default_role;
47331+ if (match->allowed_ips == NULL)
47332+ return match;
47333+ else {
47334+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47335+ if (likely
47336+ ((ntohl(curr_ip) & ipp->netmask) ==
47337+ (ntohl(ipp->addr) & ipp->netmask)))
47338+ return match;
47339+ }
47340+ match = default_role;
47341+ }
47342+ } else if (match->allowed_ips == NULL) {
47343+ return match;
47344+ } else {
47345+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47346+ if (likely
47347+ ((ntohl(curr_ip) & ipp->netmask) ==
47348+ (ntohl(ipp->addr) & ipp->netmask)))
47349+ return match;
47350+ }
47351+ goto try_group;
47352+ }
47353+
47354+ return match;
47355+}
47356+
47357+struct acl_subject_label *
47358+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47359+ const struct acl_role_label *role)
47360+{
47361+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47362+ struct acl_subject_label *match;
47363+
47364+ match = role->subj_hash[index];
47365+
47366+ while (match && (match->inode != ino || match->device != dev ||
47367+ (match->mode & GR_DELETED))) {
47368+ match = match->next;
47369+ }
47370+
47371+ if (match && !(match->mode & GR_DELETED))
47372+ return match;
47373+ else
47374+ return NULL;
47375+}
47376+
47377+struct acl_subject_label *
47378+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47379+ const struct acl_role_label *role)
47380+{
47381+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47382+ struct acl_subject_label *match;
47383+
47384+ match = role->subj_hash[index];
47385+
47386+ while (match && (match->inode != ino || match->device != dev ||
47387+ !(match->mode & GR_DELETED))) {
47388+ match = match->next;
47389+ }
47390+
47391+ if (match && (match->mode & GR_DELETED))
47392+ return match;
47393+ else
47394+ return NULL;
47395+}
47396+
47397+static struct acl_object_label *
47398+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47399+ const struct acl_subject_label *subj)
47400+{
47401+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47402+ struct acl_object_label *match;
47403+
47404+ match = subj->obj_hash[index];
47405+
47406+ while (match && (match->inode != ino || match->device != dev ||
47407+ (match->mode & GR_DELETED))) {
47408+ match = match->next;
47409+ }
47410+
47411+ if (match && !(match->mode & GR_DELETED))
47412+ return match;
47413+ else
47414+ return NULL;
47415+}
47416+
47417+static struct acl_object_label *
47418+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47419+ const struct acl_subject_label *subj)
47420+{
47421+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47422+ struct acl_object_label *match;
47423+
47424+ match = subj->obj_hash[index];
47425+
47426+ while (match && (match->inode != ino || match->device != dev ||
47427+ !(match->mode & GR_DELETED))) {
47428+ match = match->next;
47429+ }
47430+
47431+ if (match && (match->mode & GR_DELETED))
47432+ return match;
47433+
47434+ match = subj->obj_hash[index];
47435+
47436+ while (match && (match->inode != ino || match->device != dev ||
47437+ (match->mode & GR_DELETED))) {
47438+ match = match->next;
47439+ }
47440+
47441+ if (match && !(match->mode & GR_DELETED))
47442+ return match;
47443+ else
47444+ return NULL;
47445+}
47446+
47447+static struct name_entry *
47448+lookup_name_entry(const char *name)
47449+{
47450+ unsigned int len = strlen(name);
47451+ unsigned int key = full_name_hash(name, len);
47452+ unsigned int index = key % name_set.n_size;
47453+ struct name_entry *match;
47454+
47455+ match = name_set.n_hash[index];
47456+
47457+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47458+ match = match->next;
47459+
47460+ return match;
47461+}
47462+
47463+static struct name_entry *
47464+lookup_name_entry_create(const char *name)
47465+{
47466+ unsigned int len = strlen(name);
47467+ unsigned int key = full_name_hash(name, len);
47468+ unsigned int index = key % name_set.n_size;
47469+ struct name_entry *match;
47470+
47471+ match = name_set.n_hash[index];
47472+
47473+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47474+ !match->deleted))
47475+ match = match->next;
47476+
47477+ if (match && match->deleted)
47478+ return match;
47479+
47480+ match = name_set.n_hash[index];
47481+
47482+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47483+ match->deleted))
47484+ match = match->next;
47485+
47486+ if (match && !match->deleted)
47487+ return match;
47488+ else
47489+ return NULL;
47490+}
47491+
47492+static struct inodev_entry *
47493+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47494+{
47495+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47496+ struct inodev_entry *match;
47497+
47498+ match = inodev_set.i_hash[index];
47499+
47500+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47501+ match = match->next;
47502+
47503+ return match;
47504+}
47505+
47506+static void
47507+insert_inodev_entry(struct inodev_entry *entry)
47508+{
47509+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47510+ inodev_set.i_size);
47511+ struct inodev_entry **curr;
47512+
47513+ entry->prev = NULL;
47514+
47515+ curr = &inodev_set.i_hash[index];
47516+ if (*curr != NULL)
47517+ (*curr)->prev = entry;
47518+
47519+ entry->next = *curr;
47520+ *curr = entry;
47521+
47522+ return;
47523+}
47524+
47525+static void
47526+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47527+{
47528+ unsigned int index =
47529+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47530+ struct acl_role_label **curr;
47531+ struct acl_role_label *tmp;
47532+
47533+ curr = &acl_role_set.r_hash[index];
47534+
47535+ /* if role was already inserted due to domains and already has
47536+ a role in the same bucket as it attached, then we need to
47537+ combine these two buckets
47538+ */
47539+ if (role->next) {
47540+ tmp = role->next;
47541+ while (tmp->next)
47542+ tmp = tmp->next;
47543+ tmp->next = *curr;
47544+ } else
47545+ role->next = *curr;
47546+ *curr = role;
47547+
47548+ return;
47549+}
47550+
47551+static void
47552+insert_acl_role_label(struct acl_role_label *role)
47553+{
47554+ int i;
47555+
47556+ if (role_list == NULL) {
47557+ role_list = role;
47558+ role->prev = NULL;
47559+ } else {
47560+ role->prev = role_list;
47561+ role_list = role;
47562+ }
47563+
47564+ /* used for hash chains */
47565+ role->next = NULL;
47566+
47567+ if (role->roletype & GR_ROLE_DOMAIN) {
47568+ for (i = 0; i < role->domain_child_num; i++)
47569+ __insert_acl_role_label(role, role->domain_children[i]);
47570+ } else
47571+ __insert_acl_role_label(role, role->uidgid);
47572+}
47573+
47574+static int
47575+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47576+{
47577+ struct name_entry **curr, *nentry;
47578+ struct inodev_entry *ientry;
47579+ unsigned int len = strlen(name);
47580+ unsigned int key = full_name_hash(name, len);
47581+ unsigned int index = key % name_set.n_size;
47582+
47583+ curr = &name_set.n_hash[index];
47584+
47585+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47586+ curr = &((*curr)->next);
47587+
47588+ if (*curr != NULL)
47589+ return 1;
47590+
47591+ nentry = acl_alloc(sizeof (struct name_entry));
47592+ if (nentry == NULL)
47593+ return 0;
47594+ ientry = acl_alloc(sizeof (struct inodev_entry));
47595+ if (ientry == NULL)
47596+ return 0;
47597+ ientry->nentry = nentry;
47598+
47599+ nentry->key = key;
47600+ nentry->name = name;
47601+ nentry->inode = inode;
47602+ nentry->device = device;
47603+ nentry->len = len;
47604+ nentry->deleted = deleted;
47605+
47606+ nentry->prev = NULL;
47607+ curr = &name_set.n_hash[index];
47608+ if (*curr != NULL)
47609+ (*curr)->prev = nentry;
47610+ nentry->next = *curr;
47611+ *curr = nentry;
47612+
47613+ /* insert us into the table searchable by inode/dev */
47614+ insert_inodev_entry(ientry);
47615+
47616+ return 1;
47617+}
47618+
47619+static void
47620+insert_acl_obj_label(struct acl_object_label *obj,
47621+ struct acl_subject_label *subj)
47622+{
47623+ unsigned int index =
47624+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47625+ struct acl_object_label **curr;
47626+
47627+
47628+ obj->prev = NULL;
47629+
47630+ curr = &subj->obj_hash[index];
47631+ if (*curr != NULL)
47632+ (*curr)->prev = obj;
47633+
47634+ obj->next = *curr;
47635+ *curr = obj;
47636+
47637+ return;
47638+}
47639+
47640+static void
47641+insert_acl_subj_label(struct acl_subject_label *obj,
47642+ struct acl_role_label *role)
47643+{
47644+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47645+ struct acl_subject_label **curr;
47646+
47647+ obj->prev = NULL;
47648+
47649+ curr = &role->subj_hash[index];
47650+ if (*curr != NULL)
47651+ (*curr)->prev = obj;
47652+
47653+ obj->next = *curr;
47654+ *curr = obj;
47655+
47656+ return;
47657+}
47658+
47659+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47660+
47661+static void *
47662+create_table(__u32 * len, int elementsize)
47663+{
47664+ unsigned int table_sizes[] = {
47665+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47666+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47667+ 4194301, 8388593, 16777213, 33554393, 67108859
47668+ };
47669+ void *newtable = NULL;
47670+ unsigned int pwr = 0;
47671+
47672+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47673+ table_sizes[pwr] <= *len)
47674+ pwr++;
47675+
47676+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47677+ return newtable;
47678+
47679+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47680+ newtable =
47681+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47682+ else
47683+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47684+
47685+ *len = table_sizes[pwr];
47686+
47687+ return newtable;
47688+}
47689+
47690+static int
47691+init_variables(const struct gr_arg *arg)
47692+{
47693+ struct task_struct *reaper = &init_task;
47694+ unsigned int stacksize;
47695+
47696+ subj_map_set.s_size = arg->role_db.num_subjects;
47697+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47698+ name_set.n_size = arg->role_db.num_objects;
47699+ inodev_set.i_size = arg->role_db.num_objects;
47700+
47701+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47702+ !name_set.n_size || !inodev_set.i_size)
47703+ return 1;
47704+
47705+ if (!gr_init_uidset())
47706+ return 1;
47707+
47708+ /* set up the stack that holds allocation info */
47709+
47710+ stacksize = arg->role_db.num_pointers + 5;
47711+
47712+ if (!acl_alloc_stack_init(stacksize))
47713+ return 1;
47714+
47715+ /* grab reference for the real root dentry and vfsmount */
47716+ get_fs_root(reaper->fs, &real_root);
47717+
47718+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47719+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
47720+#endif
47721+
47722+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47723+ if (fakefs_obj_rw == NULL)
47724+ return 1;
47725+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47726+
47727+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47728+ if (fakefs_obj_rwx == NULL)
47729+ return 1;
47730+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47731+
47732+ subj_map_set.s_hash =
47733+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47734+ acl_role_set.r_hash =
47735+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47736+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47737+ inodev_set.i_hash =
47738+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47739+
47740+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47741+ !name_set.n_hash || !inodev_set.i_hash)
47742+ return 1;
47743+
47744+ memset(subj_map_set.s_hash, 0,
47745+ sizeof(struct subject_map *) * subj_map_set.s_size);
47746+ memset(acl_role_set.r_hash, 0,
47747+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47748+ memset(name_set.n_hash, 0,
47749+ sizeof (struct name_entry *) * name_set.n_size);
47750+ memset(inodev_set.i_hash, 0,
47751+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47752+
47753+ return 0;
47754+}
47755+
47756+/* free information not needed after startup
47757+ currently contains user->kernel pointer mappings for subjects
47758+*/
47759+
47760+static void
47761+free_init_variables(void)
47762+{
47763+ __u32 i;
47764+
47765+ if (subj_map_set.s_hash) {
47766+ for (i = 0; i < subj_map_set.s_size; i++) {
47767+ if (subj_map_set.s_hash[i]) {
47768+ kfree(subj_map_set.s_hash[i]);
47769+ subj_map_set.s_hash[i] = NULL;
47770+ }
47771+ }
47772+
47773+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47774+ PAGE_SIZE)
47775+ kfree(subj_map_set.s_hash);
47776+ else
47777+ vfree(subj_map_set.s_hash);
47778+ }
47779+
47780+ return;
47781+}
47782+
47783+static void
47784+free_variables(void)
47785+{
47786+ struct acl_subject_label *s;
47787+ struct acl_role_label *r;
47788+ struct task_struct *task, *task2;
47789+ unsigned int x;
47790+
47791+ gr_clear_learn_entries();
47792+
47793+ read_lock(&tasklist_lock);
47794+ do_each_thread(task2, task) {
47795+ task->acl_sp_role = 0;
47796+ task->acl_role_id = 0;
47797+ task->acl = NULL;
47798+ task->role = NULL;
47799+ } while_each_thread(task2, task);
47800+ read_unlock(&tasklist_lock);
47801+
47802+ /* release the reference to the real root dentry and vfsmount */
47803+ path_put(&real_root);
47804+
47805+ /* free all object hash tables */
47806+
47807+ FOR_EACH_ROLE_START(r)
47808+ if (r->subj_hash == NULL)
47809+ goto next_role;
47810+ FOR_EACH_SUBJECT_START(r, s, x)
47811+ if (s->obj_hash == NULL)
47812+ break;
47813+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47814+ kfree(s->obj_hash);
47815+ else
47816+ vfree(s->obj_hash);
47817+ FOR_EACH_SUBJECT_END(s, x)
47818+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47819+ if (s->obj_hash == NULL)
47820+ break;
47821+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47822+ kfree(s->obj_hash);
47823+ else
47824+ vfree(s->obj_hash);
47825+ FOR_EACH_NESTED_SUBJECT_END(s)
47826+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47827+ kfree(r->subj_hash);
47828+ else
47829+ vfree(r->subj_hash);
47830+ r->subj_hash = NULL;
47831+next_role:
47832+ FOR_EACH_ROLE_END(r)
47833+
47834+ acl_free_all();
47835+
47836+ if (acl_role_set.r_hash) {
47837+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47838+ PAGE_SIZE)
47839+ kfree(acl_role_set.r_hash);
47840+ else
47841+ vfree(acl_role_set.r_hash);
47842+ }
47843+ if (name_set.n_hash) {
47844+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47845+ PAGE_SIZE)
47846+ kfree(name_set.n_hash);
47847+ else
47848+ vfree(name_set.n_hash);
47849+ }
47850+
47851+ if (inodev_set.i_hash) {
47852+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47853+ PAGE_SIZE)
47854+ kfree(inodev_set.i_hash);
47855+ else
47856+ vfree(inodev_set.i_hash);
47857+ }
47858+
47859+ gr_free_uidset();
47860+
47861+ memset(&name_set, 0, sizeof (struct name_db));
47862+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47863+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47864+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47865+
47866+ default_role = NULL;
47867+ role_list = NULL;
47868+
47869+ return;
47870+}
47871+
47872+static __u32
47873+count_user_objs(struct acl_object_label *userp)
47874+{
47875+ struct acl_object_label o_tmp;
47876+ __u32 num = 0;
47877+
47878+ while (userp) {
47879+ if (copy_from_user(&o_tmp, userp,
47880+ sizeof (struct acl_object_label)))
47881+ break;
47882+
47883+ userp = o_tmp.prev;
47884+ num++;
47885+ }
47886+
47887+ return num;
47888+}
47889+
47890+static struct acl_subject_label *
47891+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47892+
47893+static int
47894+copy_user_glob(struct acl_object_label *obj)
47895+{
47896+ struct acl_object_label *g_tmp, **guser;
47897+ unsigned int len;
47898+ char *tmp;
47899+
47900+ if (obj->globbed == NULL)
47901+ return 0;
47902+
47903+ guser = &obj->globbed;
47904+ while (*guser) {
47905+ g_tmp = (struct acl_object_label *)
47906+ acl_alloc(sizeof (struct acl_object_label));
47907+ if (g_tmp == NULL)
47908+ return -ENOMEM;
47909+
47910+ if (copy_from_user(g_tmp, *guser,
47911+ sizeof (struct acl_object_label)))
47912+ return -EFAULT;
47913+
47914+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47915+
47916+ if (!len || len >= PATH_MAX)
47917+ return -EINVAL;
47918+
47919+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47920+ return -ENOMEM;
47921+
47922+ if (copy_from_user(tmp, g_tmp->filename, len))
47923+ return -EFAULT;
47924+ tmp[len-1] = '\0';
47925+ g_tmp->filename = tmp;
47926+
47927+ *guser = g_tmp;
47928+ guser = &(g_tmp->next);
47929+ }
47930+
47931+ return 0;
47932+}
47933+
47934+static int
47935+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47936+ struct acl_role_label *role)
47937+{
47938+ struct acl_object_label *o_tmp;
47939+ unsigned int len;
47940+ int ret;
47941+ char *tmp;
47942+
47943+ while (userp) {
47944+ if ((o_tmp = (struct acl_object_label *)
47945+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47946+ return -ENOMEM;
47947+
47948+ if (copy_from_user(o_tmp, userp,
47949+ sizeof (struct acl_object_label)))
47950+ return -EFAULT;
47951+
47952+ userp = o_tmp->prev;
47953+
47954+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47955+
47956+ if (!len || len >= PATH_MAX)
47957+ return -EINVAL;
47958+
47959+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47960+ return -ENOMEM;
47961+
47962+ if (copy_from_user(tmp, o_tmp->filename, len))
47963+ return -EFAULT;
47964+ tmp[len-1] = '\0';
47965+ o_tmp->filename = tmp;
47966+
47967+ insert_acl_obj_label(o_tmp, subj);
47968+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47969+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47970+ return -ENOMEM;
47971+
47972+ ret = copy_user_glob(o_tmp);
47973+ if (ret)
47974+ return ret;
47975+
47976+ if (o_tmp->nested) {
47977+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47978+ if (IS_ERR(o_tmp->nested))
47979+ return PTR_ERR(o_tmp->nested);
47980+
47981+ /* insert into nested subject list */
47982+ o_tmp->nested->next = role->hash->first;
47983+ role->hash->first = o_tmp->nested;
47984+ }
47985+ }
47986+
47987+ return 0;
47988+}
47989+
47990+static __u32
47991+count_user_subjs(struct acl_subject_label *userp)
47992+{
47993+ struct acl_subject_label s_tmp;
47994+ __u32 num = 0;
47995+
47996+ while (userp) {
47997+ if (copy_from_user(&s_tmp, userp,
47998+ sizeof (struct acl_subject_label)))
47999+ break;
48000+
48001+ userp = s_tmp.prev;
48002+ /* do not count nested subjects against this count, since
48003+ they are not included in the hash table, but are
48004+ attached to objects. We have already counted
48005+ the subjects in userspace for the allocation
48006+ stack
48007+ */
48008+ if (!(s_tmp.mode & GR_NESTED))
48009+ num++;
48010+ }
48011+
48012+ return num;
48013+}
48014+
48015+static int
48016+copy_user_allowedips(struct acl_role_label *rolep)
48017+{
48018+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48019+
48020+ ruserip = rolep->allowed_ips;
48021+
48022+ while (ruserip) {
48023+ rlast = rtmp;
48024+
48025+ if ((rtmp = (struct role_allowed_ip *)
48026+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48027+ return -ENOMEM;
48028+
48029+ if (copy_from_user(rtmp, ruserip,
48030+ sizeof (struct role_allowed_ip)))
48031+ return -EFAULT;
48032+
48033+ ruserip = rtmp->prev;
48034+
48035+ if (!rlast) {
48036+ rtmp->prev = NULL;
48037+ rolep->allowed_ips = rtmp;
48038+ } else {
48039+ rlast->next = rtmp;
48040+ rtmp->prev = rlast;
48041+ }
48042+
48043+ if (!ruserip)
48044+ rtmp->next = NULL;
48045+ }
48046+
48047+ return 0;
48048+}
48049+
48050+static int
48051+copy_user_transitions(struct acl_role_label *rolep)
48052+{
48053+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48054+
48055+ unsigned int len;
48056+ char *tmp;
48057+
48058+ rusertp = rolep->transitions;
48059+
48060+ while (rusertp) {
48061+ rlast = rtmp;
48062+
48063+ if ((rtmp = (struct role_transition *)
48064+ acl_alloc(sizeof (struct role_transition))) == NULL)
48065+ return -ENOMEM;
48066+
48067+ if (copy_from_user(rtmp, rusertp,
48068+ sizeof (struct role_transition)))
48069+ return -EFAULT;
48070+
48071+ rusertp = rtmp->prev;
48072+
48073+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48074+
48075+ if (!len || len >= GR_SPROLE_LEN)
48076+ return -EINVAL;
48077+
48078+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48079+ return -ENOMEM;
48080+
48081+ if (copy_from_user(tmp, rtmp->rolename, len))
48082+ return -EFAULT;
48083+ tmp[len-1] = '\0';
48084+ rtmp->rolename = tmp;
48085+
48086+ if (!rlast) {
48087+ rtmp->prev = NULL;
48088+ rolep->transitions = rtmp;
48089+ } else {
48090+ rlast->next = rtmp;
48091+ rtmp->prev = rlast;
48092+ }
48093+
48094+ if (!rusertp)
48095+ rtmp->next = NULL;
48096+ }
48097+
48098+ return 0;
48099+}
48100+
48101+static struct acl_subject_label *
48102+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48103+{
48104+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48105+ unsigned int len;
48106+ char *tmp;
48107+ __u32 num_objs;
48108+ struct acl_ip_label **i_tmp, *i_utmp2;
48109+ struct gr_hash_struct ghash;
48110+ struct subject_map *subjmap;
48111+ unsigned int i_num;
48112+ int err;
48113+
48114+ s_tmp = lookup_subject_map(userp);
48115+
48116+ /* we've already copied this subject into the kernel, just return
48117+ the reference to it, and don't copy it over again
48118+ */
48119+ if (s_tmp)
48120+ return(s_tmp);
48121+
48122+ if ((s_tmp = (struct acl_subject_label *)
48123+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48124+ return ERR_PTR(-ENOMEM);
48125+
48126+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48127+ if (subjmap == NULL)
48128+ return ERR_PTR(-ENOMEM);
48129+
48130+ subjmap->user = userp;
48131+ subjmap->kernel = s_tmp;
48132+ insert_subj_map_entry(subjmap);
48133+
48134+ if (copy_from_user(s_tmp, userp,
48135+ sizeof (struct acl_subject_label)))
48136+ return ERR_PTR(-EFAULT);
48137+
48138+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48139+
48140+ if (!len || len >= PATH_MAX)
48141+ return ERR_PTR(-EINVAL);
48142+
48143+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48144+ return ERR_PTR(-ENOMEM);
48145+
48146+ if (copy_from_user(tmp, s_tmp->filename, len))
48147+ return ERR_PTR(-EFAULT);
48148+ tmp[len-1] = '\0';
48149+ s_tmp->filename = tmp;
48150+
48151+ if (!strcmp(s_tmp->filename, "/"))
48152+ role->root_label = s_tmp;
48153+
48154+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48155+ return ERR_PTR(-EFAULT);
48156+
48157+ /* copy user and group transition tables */
48158+
48159+ if (s_tmp->user_trans_num) {
48160+ uid_t *uidlist;
48161+
48162+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48163+ if (uidlist == NULL)
48164+ return ERR_PTR(-ENOMEM);
48165+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48166+ return ERR_PTR(-EFAULT);
48167+
48168+ s_tmp->user_transitions = uidlist;
48169+ }
48170+
48171+ if (s_tmp->group_trans_num) {
48172+ gid_t *gidlist;
48173+
48174+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48175+ if (gidlist == NULL)
48176+ return ERR_PTR(-ENOMEM);
48177+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48178+ return ERR_PTR(-EFAULT);
48179+
48180+ s_tmp->group_transitions = gidlist;
48181+ }
48182+
48183+ /* set up object hash table */
48184+ num_objs = count_user_objs(ghash.first);
48185+
48186+ s_tmp->obj_hash_size = num_objs;
48187+ s_tmp->obj_hash =
48188+ (struct acl_object_label **)
48189+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48190+
48191+ if (!s_tmp->obj_hash)
48192+ return ERR_PTR(-ENOMEM);
48193+
48194+ memset(s_tmp->obj_hash, 0,
48195+ s_tmp->obj_hash_size *
48196+ sizeof (struct acl_object_label *));
48197+
48198+ /* add in objects */
48199+ err = copy_user_objs(ghash.first, s_tmp, role);
48200+
48201+ if (err)
48202+ return ERR_PTR(err);
48203+
48204+ /* set pointer for parent subject */
48205+ if (s_tmp->parent_subject) {
48206+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48207+
48208+ if (IS_ERR(s_tmp2))
48209+ return s_tmp2;
48210+
48211+ s_tmp->parent_subject = s_tmp2;
48212+ }
48213+
48214+ /* add in ip acls */
48215+
48216+ if (!s_tmp->ip_num) {
48217+ s_tmp->ips = NULL;
48218+ goto insert;
48219+ }
48220+
48221+ i_tmp =
48222+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48223+ sizeof (struct acl_ip_label *));
48224+
48225+ if (!i_tmp)
48226+ return ERR_PTR(-ENOMEM);
48227+
48228+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48229+ *(i_tmp + i_num) =
48230+ (struct acl_ip_label *)
48231+ acl_alloc(sizeof (struct acl_ip_label));
48232+ if (!*(i_tmp + i_num))
48233+ return ERR_PTR(-ENOMEM);
48234+
48235+ if (copy_from_user
48236+ (&i_utmp2, s_tmp->ips + i_num,
48237+ sizeof (struct acl_ip_label *)))
48238+ return ERR_PTR(-EFAULT);
48239+
48240+ if (copy_from_user
48241+ (*(i_tmp + i_num), i_utmp2,
48242+ sizeof (struct acl_ip_label)))
48243+ return ERR_PTR(-EFAULT);
48244+
48245+ if ((*(i_tmp + i_num))->iface == NULL)
48246+ continue;
48247+
48248+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48249+ if (!len || len >= IFNAMSIZ)
48250+ return ERR_PTR(-EINVAL);
48251+ tmp = acl_alloc(len);
48252+ if (tmp == NULL)
48253+ return ERR_PTR(-ENOMEM);
48254+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48255+ return ERR_PTR(-EFAULT);
48256+ (*(i_tmp + i_num))->iface = tmp;
48257+ }
48258+
48259+ s_tmp->ips = i_tmp;
48260+
48261+insert:
48262+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48263+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48264+ return ERR_PTR(-ENOMEM);
48265+
48266+ return s_tmp;
48267+}
48268+
48269+static int
48270+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48271+{
48272+ struct acl_subject_label s_pre;
48273+ struct acl_subject_label * ret;
48274+ int err;
48275+
48276+ while (userp) {
48277+ if (copy_from_user(&s_pre, userp,
48278+ sizeof (struct acl_subject_label)))
48279+ return -EFAULT;
48280+
48281+ /* do not add nested subjects here, add
48282+ while parsing objects
48283+ */
48284+
48285+ if (s_pre.mode & GR_NESTED) {
48286+ userp = s_pre.prev;
48287+ continue;
48288+ }
48289+
48290+ ret = do_copy_user_subj(userp, role);
48291+
48292+ err = PTR_ERR(ret);
48293+ if (IS_ERR(ret))
48294+ return err;
48295+
48296+ insert_acl_subj_label(ret, role);
48297+
48298+ userp = s_pre.prev;
48299+ }
48300+
48301+ return 0;
48302+}
48303+
48304+static int
48305+copy_user_acl(struct gr_arg *arg)
48306+{
48307+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48308+ struct sprole_pw *sptmp;
48309+ struct gr_hash_struct *ghash;
48310+ uid_t *domainlist;
48311+ unsigned int r_num;
48312+ unsigned int len;
48313+ char *tmp;
48314+ int err = 0;
48315+ __u16 i;
48316+ __u32 num_subjs;
48317+
48318+ /* we need a default and kernel role */
48319+ if (arg->role_db.num_roles < 2)
48320+ return -EINVAL;
48321+
48322+ /* copy special role authentication info from userspace */
48323+
48324+ num_sprole_pws = arg->num_sprole_pws;
48325+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48326+
48327+ if (!acl_special_roles) {
48328+ err = -ENOMEM;
48329+ goto cleanup;
48330+ }
48331+
48332+ for (i = 0; i < num_sprole_pws; i++) {
48333+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48334+ if (!sptmp) {
48335+ err = -ENOMEM;
48336+ goto cleanup;
48337+ }
48338+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48339+ sizeof (struct sprole_pw))) {
48340+ err = -EFAULT;
48341+ goto cleanup;
48342+ }
48343+
48344+ len =
48345+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48346+
48347+ if (!len || len >= GR_SPROLE_LEN) {
48348+ err = -EINVAL;
48349+ goto cleanup;
48350+ }
48351+
48352+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48353+ err = -ENOMEM;
48354+ goto cleanup;
48355+ }
48356+
48357+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48358+ err = -EFAULT;
48359+ goto cleanup;
48360+ }
48361+ tmp[len-1] = '\0';
48362+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48363+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48364+#endif
48365+ sptmp->rolename = tmp;
48366+ acl_special_roles[i] = sptmp;
48367+ }
48368+
48369+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48370+
48371+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48372+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48373+
48374+ if (!r_tmp) {
48375+ err = -ENOMEM;
48376+ goto cleanup;
48377+ }
48378+
48379+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48380+ sizeof (struct acl_role_label *))) {
48381+ err = -EFAULT;
48382+ goto cleanup;
48383+ }
48384+
48385+ if (copy_from_user(r_tmp, r_utmp2,
48386+ sizeof (struct acl_role_label))) {
48387+ err = -EFAULT;
48388+ goto cleanup;
48389+ }
48390+
48391+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48392+
48393+ if (!len || len >= PATH_MAX) {
48394+ err = -EINVAL;
48395+ goto cleanup;
48396+ }
48397+
48398+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48399+ err = -ENOMEM;
48400+ goto cleanup;
48401+ }
48402+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48403+ err = -EFAULT;
48404+ goto cleanup;
48405+ }
48406+ tmp[len-1] = '\0';
48407+ r_tmp->rolename = tmp;
48408+
48409+ if (!strcmp(r_tmp->rolename, "default")
48410+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48411+ default_role = r_tmp;
48412+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48413+ kernel_role = r_tmp;
48414+ }
48415+
48416+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48417+ err = -ENOMEM;
48418+ goto cleanup;
48419+ }
48420+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48421+ err = -EFAULT;
48422+ goto cleanup;
48423+ }
48424+
48425+ r_tmp->hash = ghash;
48426+
48427+ num_subjs = count_user_subjs(r_tmp->hash->first);
48428+
48429+ r_tmp->subj_hash_size = num_subjs;
48430+ r_tmp->subj_hash =
48431+ (struct acl_subject_label **)
48432+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48433+
48434+ if (!r_tmp->subj_hash) {
48435+ err = -ENOMEM;
48436+ goto cleanup;
48437+ }
48438+
48439+ err = copy_user_allowedips(r_tmp);
48440+ if (err)
48441+ goto cleanup;
48442+
48443+ /* copy domain info */
48444+ if (r_tmp->domain_children != NULL) {
48445+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48446+ if (domainlist == NULL) {
48447+ err = -ENOMEM;
48448+ goto cleanup;
48449+ }
48450+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48451+ err = -EFAULT;
48452+ goto cleanup;
48453+ }
48454+ r_tmp->domain_children = domainlist;
48455+ }
48456+
48457+ err = copy_user_transitions(r_tmp);
48458+ if (err)
48459+ goto cleanup;
48460+
48461+ memset(r_tmp->subj_hash, 0,
48462+ r_tmp->subj_hash_size *
48463+ sizeof (struct acl_subject_label *));
48464+
48465+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48466+
48467+ if (err)
48468+ goto cleanup;
48469+
48470+ /* set nested subject list to null */
48471+ r_tmp->hash->first = NULL;
48472+
48473+ insert_acl_role_label(r_tmp);
48474+ }
48475+
48476+ goto return_err;
48477+ cleanup:
48478+ free_variables();
48479+ return_err:
48480+ return err;
48481+
48482+}
48483+
48484+static int
48485+gracl_init(struct gr_arg *args)
48486+{
48487+ int error = 0;
48488+
48489+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48490+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48491+
48492+ if (init_variables(args)) {
48493+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48494+ error = -ENOMEM;
48495+ free_variables();
48496+ goto out;
48497+ }
48498+
48499+ error = copy_user_acl(args);
48500+ free_init_variables();
48501+ if (error) {
48502+ free_variables();
48503+ goto out;
48504+ }
48505+
48506+ if ((error = gr_set_acls(0))) {
48507+ free_variables();
48508+ goto out;
48509+ }
48510+
48511+ pax_open_kernel();
48512+ gr_status |= GR_READY;
48513+ pax_close_kernel();
48514+
48515+ out:
48516+ return error;
48517+}
48518+
48519+/* derived from glibc fnmatch() 0: match, 1: no match*/
48520+
48521+static int
48522+glob_match(const char *p, const char *n)
48523+{
48524+ char c;
48525+
48526+ while ((c = *p++) != '\0') {
48527+ switch (c) {
48528+ case '?':
48529+ if (*n == '\0')
48530+ return 1;
48531+ else if (*n == '/')
48532+ return 1;
48533+ break;
48534+ case '\\':
48535+ if (*n != c)
48536+ return 1;
48537+ break;
48538+ case '*':
48539+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48540+ if (*n == '/')
48541+ return 1;
48542+ else if (c == '?') {
48543+ if (*n == '\0')
48544+ return 1;
48545+ else
48546+ ++n;
48547+ }
48548+ }
48549+ if (c == '\0') {
48550+ return 0;
48551+ } else {
48552+ const char *endp;
48553+
48554+ if ((endp = strchr(n, '/')) == NULL)
48555+ endp = n + strlen(n);
48556+
48557+ if (c == '[') {
48558+ for (--p; n < endp; ++n)
48559+ if (!glob_match(p, n))
48560+ return 0;
48561+ } else if (c == '/') {
48562+ while (*n != '\0' && *n != '/')
48563+ ++n;
48564+ if (*n == '/' && !glob_match(p, n + 1))
48565+ return 0;
48566+ } else {
48567+ for (--p; n < endp; ++n)
48568+ if (*n == c && !glob_match(p, n))
48569+ return 0;
48570+ }
48571+
48572+ return 1;
48573+ }
48574+ case '[':
48575+ {
48576+ int not;
48577+ char cold;
48578+
48579+ if (*n == '\0' || *n == '/')
48580+ return 1;
48581+
48582+ not = (*p == '!' || *p == '^');
48583+ if (not)
48584+ ++p;
48585+
48586+ c = *p++;
48587+ for (;;) {
48588+ unsigned char fn = (unsigned char)*n;
48589+
48590+ if (c == '\0')
48591+ return 1;
48592+ else {
48593+ if (c == fn)
48594+ goto matched;
48595+ cold = c;
48596+ c = *p++;
48597+
48598+ if (c == '-' && *p != ']') {
48599+ unsigned char cend = *p++;
48600+
48601+ if (cend == '\0')
48602+ return 1;
48603+
48604+ if (cold <= fn && fn <= cend)
48605+ goto matched;
48606+
48607+ c = *p++;
48608+ }
48609+ }
48610+
48611+ if (c == ']')
48612+ break;
48613+ }
48614+ if (!not)
48615+ return 1;
48616+ break;
48617+ matched:
48618+ while (c != ']') {
48619+ if (c == '\0')
48620+ return 1;
48621+
48622+ c = *p++;
48623+ }
48624+ if (not)
48625+ return 1;
48626+ }
48627+ break;
48628+ default:
48629+ if (c != *n)
48630+ return 1;
48631+ }
48632+
48633+ ++n;
48634+ }
48635+
48636+ if (*n == '\0')
48637+ return 0;
48638+
48639+ if (*n == '/')
48640+ return 0;
48641+
48642+ return 1;
48643+}
48644+
48645+static struct acl_object_label *
48646+chk_glob_label(struct acl_object_label *globbed,
48647+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48648+{
48649+ struct acl_object_label *tmp;
48650+
48651+ if (*path == NULL)
48652+ *path = gr_to_filename_nolock(dentry, mnt);
48653+
48654+ tmp = globbed;
48655+
48656+ while (tmp) {
48657+ if (!glob_match(tmp->filename, *path))
48658+ return tmp;
48659+ tmp = tmp->next;
48660+ }
48661+
48662+ return NULL;
48663+}
48664+
48665+static struct acl_object_label *
48666+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48667+ const ino_t curr_ino, const dev_t curr_dev,
48668+ const struct acl_subject_label *subj, char **path, const int checkglob)
48669+{
48670+ struct acl_subject_label *tmpsubj;
48671+ struct acl_object_label *retval;
48672+ struct acl_object_label *retval2;
48673+
48674+ tmpsubj = (struct acl_subject_label *) subj;
48675+ read_lock(&gr_inode_lock);
48676+ do {
48677+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48678+ if (retval) {
48679+ if (checkglob && retval->globbed) {
48680+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48681+ (struct vfsmount *)orig_mnt, path);
48682+ if (retval2)
48683+ retval = retval2;
48684+ }
48685+ break;
48686+ }
48687+ } while ((tmpsubj = tmpsubj->parent_subject));
48688+ read_unlock(&gr_inode_lock);
48689+
48690+ return retval;
48691+}
48692+
48693+static __inline__ struct acl_object_label *
48694+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48695+ struct dentry *curr_dentry,
48696+ const struct acl_subject_label *subj, char **path, const int checkglob)
48697+{
48698+ int newglob = checkglob;
48699+ ino_t inode;
48700+ dev_t device;
48701+
48702+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48703+ as we don't want a / * rule to match instead of the / object
48704+ don't do this for create lookups that call this function though, since they're looking up
48705+ on the parent and thus need globbing checks on all paths
48706+ */
48707+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48708+ newglob = GR_NO_GLOB;
48709+
48710+ spin_lock(&curr_dentry->d_lock);
48711+ inode = curr_dentry->d_inode->i_ino;
48712+ device = __get_dev(curr_dentry);
48713+ spin_unlock(&curr_dentry->d_lock);
48714+
48715+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
48716+}
48717+
48718+static struct acl_object_label *
48719+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48720+ const struct acl_subject_label *subj, char *path, const int checkglob)
48721+{
48722+ struct dentry *dentry = (struct dentry *) l_dentry;
48723+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48724+ struct acl_object_label *retval;
48725+ struct dentry *parent;
48726+
48727+ write_seqlock(&rename_lock);
48728+ br_read_lock(vfsmount_lock);
48729+
48730+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48731+#ifdef CONFIG_NET
48732+ mnt == sock_mnt ||
48733+#endif
48734+#ifdef CONFIG_HUGETLBFS
48735+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48736+#endif
48737+ /* ignore Eric Biederman */
48738+ IS_PRIVATE(l_dentry->d_inode))) {
48739+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48740+ goto out;
48741+ }
48742+
48743+ for (;;) {
48744+ if (dentry == real_root.dentry && mnt == real_root.mnt)
48745+ break;
48746+
48747+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48748+ if (mnt->mnt_parent == mnt)
48749+ break;
48750+
48751+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48752+ if (retval != NULL)
48753+ goto out;
48754+
48755+ dentry = mnt->mnt_mountpoint;
48756+ mnt = mnt->mnt_parent;
48757+ continue;
48758+ }
48759+
48760+ parent = dentry->d_parent;
48761+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48762+ if (retval != NULL)
48763+ goto out;
48764+
48765+ dentry = parent;
48766+ }
48767+
48768+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48769+
48770+ /* real_root is pinned so we don't have to hold a reference */
48771+ if (retval == NULL)
48772+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
48773+out:
48774+ br_read_unlock(vfsmount_lock);
48775+ write_sequnlock(&rename_lock);
48776+
48777+ BUG_ON(retval == NULL);
48778+
48779+ return retval;
48780+}
48781+
48782+static __inline__ struct acl_object_label *
48783+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48784+ const struct acl_subject_label *subj)
48785+{
48786+ char *path = NULL;
48787+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48788+}
48789+
48790+static __inline__ struct acl_object_label *
48791+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48792+ const struct acl_subject_label *subj)
48793+{
48794+ char *path = NULL;
48795+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48796+}
48797+
48798+static __inline__ struct acl_object_label *
48799+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48800+ const struct acl_subject_label *subj, char *path)
48801+{
48802+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48803+}
48804+
48805+static struct acl_subject_label *
48806+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48807+ const struct acl_role_label *role)
48808+{
48809+ struct dentry *dentry = (struct dentry *) l_dentry;
48810+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48811+ struct acl_subject_label *retval;
48812+ struct dentry *parent;
48813+
48814+ write_seqlock(&rename_lock);
48815+ br_read_lock(vfsmount_lock);
48816+
48817+ for (;;) {
48818+ if (dentry == real_root.dentry && mnt == real_root.mnt)
48819+ break;
48820+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48821+ if (mnt->mnt_parent == mnt)
48822+ break;
48823+
48824+ spin_lock(&dentry->d_lock);
48825+ read_lock(&gr_inode_lock);
48826+ retval =
48827+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48828+ __get_dev(dentry), role);
48829+ read_unlock(&gr_inode_lock);
48830+ spin_unlock(&dentry->d_lock);
48831+ if (retval != NULL)
48832+ goto out;
48833+
48834+ dentry = mnt->mnt_mountpoint;
48835+ mnt = mnt->mnt_parent;
48836+ continue;
48837+ }
48838+
48839+ spin_lock(&dentry->d_lock);
48840+ read_lock(&gr_inode_lock);
48841+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48842+ __get_dev(dentry), role);
48843+ read_unlock(&gr_inode_lock);
48844+ parent = dentry->d_parent;
48845+ spin_unlock(&dentry->d_lock);
48846+
48847+ if (retval != NULL)
48848+ goto out;
48849+
48850+ dentry = parent;
48851+ }
48852+
48853+ spin_lock(&dentry->d_lock);
48854+ read_lock(&gr_inode_lock);
48855+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48856+ __get_dev(dentry), role);
48857+ read_unlock(&gr_inode_lock);
48858+ spin_unlock(&dentry->d_lock);
48859+
48860+ if (unlikely(retval == NULL)) {
48861+ /* real_root is pinned, we don't need to hold a reference */
48862+ read_lock(&gr_inode_lock);
48863+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
48864+ __get_dev(real_root.dentry), role);
48865+ read_unlock(&gr_inode_lock);
48866+ }
48867+out:
48868+ br_read_unlock(vfsmount_lock);
48869+ write_sequnlock(&rename_lock);
48870+
48871+ BUG_ON(retval == NULL);
48872+
48873+ return retval;
48874+}
48875+
48876+static void
48877+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48878+{
48879+ struct task_struct *task = current;
48880+ const struct cred *cred = current_cred();
48881+
48882+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48883+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48884+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48885+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48886+
48887+ return;
48888+}
48889+
48890+static void
48891+gr_log_learn_sysctl(const char *path, const __u32 mode)
48892+{
48893+ struct task_struct *task = current;
48894+ const struct cred *cred = current_cred();
48895+
48896+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48897+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48898+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48899+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48900+
48901+ return;
48902+}
48903+
48904+static void
48905+gr_log_learn_id_change(const char type, const unsigned int real,
48906+ const unsigned int effective, const unsigned int fs)
48907+{
48908+ struct task_struct *task = current;
48909+ const struct cred *cred = current_cred();
48910+
48911+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48912+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48913+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48914+ type, real, effective, fs, &task->signal->saved_ip);
48915+
48916+ return;
48917+}
48918+
48919+__u32
48920+gr_search_file(const struct dentry * dentry, const __u32 mode,
48921+ const struct vfsmount * mnt)
48922+{
48923+ __u32 retval = mode;
48924+ struct acl_subject_label *curracl;
48925+ struct acl_object_label *currobj;
48926+
48927+ if (unlikely(!(gr_status & GR_READY)))
48928+ return (mode & ~GR_AUDITS);
48929+
48930+ curracl = current->acl;
48931+
48932+ currobj = chk_obj_label(dentry, mnt, curracl);
48933+ retval = currobj->mode & mode;
48934+
48935+ /* if we're opening a specified transfer file for writing
48936+ (e.g. /dev/initctl), then transfer our role to init
48937+ */
48938+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48939+ current->role->roletype & GR_ROLE_PERSIST)) {
48940+ struct task_struct *task = init_pid_ns.child_reaper;
48941+
48942+ if (task->role != current->role) {
48943+ task->acl_sp_role = 0;
48944+ task->acl_role_id = current->acl_role_id;
48945+ task->role = current->role;
48946+ rcu_read_lock();
48947+ read_lock(&grsec_exec_file_lock);
48948+ gr_apply_subject_to_task(task);
48949+ read_unlock(&grsec_exec_file_lock);
48950+ rcu_read_unlock();
48951+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48952+ }
48953+ }
48954+
48955+ if (unlikely
48956+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48957+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48958+ __u32 new_mode = mode;
48959+
48960+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48961+
48962+ retval = new_mode;
48963+
48964+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48965+ new_mode |= GR_INHERIT;
48966+
48967+ if (!(mode & GR_NOLEARN))
48968+ gr_log_learn(dentry, mnt, new_mode);
48969+ }
48970+
48971+ return retval;
48972+}
48973+
48974+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
48975+ const struct dentry *parent,
48976+ const struct vfsmount *mnt)
48977+{
48978+ struct name_entry *match;
48979+ struct acl_object_label *matchpo;
48980+ struct acl_subject_label *curracl;
48981+ char *path;
48982+
48983+ if (unlikely(!(gr_status & GR_READY)))
48984+ return NULL;
48985+
48986+ preempt_disable();
48987+ path = gr_to_filename_rbac(new_dentry, mnt);
48988+ match = lookup_name_entry_create(path);
48989+
48990+ curracl = current->acl;
48991+
48992+ if (match) {
48993+ read_lock(&gr_inode_lock);
48994+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48995+ read_unlock(&gr_inode_lock);
48996+
48997+ if (matchpo) {
48998+ preempt_enable();
48999+ return matchpo;
49000+ }
49001+ }
49002+
49003+ // lookup parent
49004+
49005+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49006+
49007+ preempt_enable();
49008+ return matchpo;
49009+}
49010+
49011+__u32
49012+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49013+ const struct vfsmount * mnt, const __u32 mode)
49014+{
49015+ struct acl_object_label *matchpo;
49016+ __u32 retval;
49017+
49018+ if (unlikely(!(gr_status & GR_READY)))
49019+ return (mode & ~GR_AUDITS);
49020+
49021+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
49022+
49023+ retval = matchpo->mode & mode;
49024+
49025+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49026+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49027+ __u32 new_mode = mode;
49028+
49029+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49030+
49031+ gr_log_learn(new_dentry, mnt, new_mode);
49032+ return new_mode;
49033+ }
49034+
49035+ return retval;
49036+}
49037+
49038+__u32
49039+gr_check_link(const struct dentry * new_dentry,
49040+ const struct dentry * parent_dentry,
49041+ const struct vfsmount * parent_mnt,
49042+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49043+{
49044+ struct acl_object_label *obj;
49045+ __u32 oldmode, newmode;
49046+ __u32 needmode;
49047+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49048+ GR_DELETE | GR_INHERIT;
49049+
49050+ if (unlikely(!(gr_status & GR_READY)))
49051+ return (GR_CREATE | GR_LINK);
49052+
49053+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49054+ oldmode = obj->mode;
49055+
49056+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49057+ newmode = obj->mode;
49058+
49059+ needmode = newmode & checkmodes;
49060+
49061+ // old name for hardlink must have at least the permissions of the new name
49062+ if ((oldmode & needmode) != needmode)
49063+ goto bad;
49064+
49065+ // if old name had restrictions/auditing, make sure the new name does as well
49066+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49067+
49068+ // don't allow hardlinking of suid/sgid files without permission
49069+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49070+ needmode |= GR_SETID;
49071+
49072+ if ((newmode & needmode) != needmode)
49073+ goto bad;
49074+
49075+ // enforce minimum permissions
49076+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49077+ return newmode;
49078+bad:
49079+ needmode = oldmode;
49080+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49081+ needmode |= GR_SETID;
49082+
49083+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49084+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49085+ return (GR_CREATE | GR_LINK);
49086+ } else if (newmode & GR_SUPPRESS)
49087+ return GR_SUPPRESS;
49088+ else
49089+ return 0;
49090+}
49091+
49092+int
49093+gr_check_hidden_task(const struct task_struct *task)
49094+{
49095+ if (unlikely(!(gr_status & GR_READY)))
49096+ return 0;
49097+
49098+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49099+ return 1;
49100+
49101+ return 0;
49102+}
49103+
49104+int
49105+gr_check_protected_task(const struct task_struct *task)
49106+{
49107+ if (unlikely(!(gr_status & GR_READY) || !task))
49108+ return 0;
49109+
49110+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49111+ task->acl != current->acl)
49112+ return 1;
49113+
49114+ return 0;
49115+}
49116+
49117+int
49118+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49119+{
49120+ struct task_struct *p;
49121+ int ret = 0;
49122+
49123+ if (unlikely(!(gr_status & GR_READY) || !pid))
49124+ return ret;
49125+
49126+ read_lock(&tasklist_lock);
49127+ do_each_pid_task(pid, type, p) {
49128+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49129+ p->acl != current->acl) {
49130+ ret = 1;
49131+ goto out;
49132+ }
49133+ } while_each_pid_task(pid, type, p);
49134+out:
49135+ read_unlock(&tasklist_lock);
49136+
49137+ return ret;
49138+}
49139+
49140+void
49141+gr_copy_label(struct task_struct *tsk)
49142+{
49143+ tsk->signal->used_accept = 0;
49144+ tsk->acl_sp_role = 0;
49145+ tsk->acl_role_id = current->acl_role_id;
49146+ tsk->acl = current->acl;
49147+ tsk->role = current->role;
49148+ tsk->signal->curr_ip = current->signal->curr_ip;
49149+ tsk->signal->saved_ip = current->signal->saved_ip;
49150+ if (current->exec_file)
49151+ get_file(current->exec_file);
49152+ tsk->exec_file = current->exec_file;
49153+ tsk->is_writable = current->is_writable;
49154+ if (unlikely(current->signal->used_accept)) {
49155+ current->signal->curr_ip = 0;
49156+ current->signal->saved_ip = 0;
49157+ }
49158+
49159+ return;
49160+}
49161+
49162+static void
49163+gr_set_proc_res(struct task_struct *task)
49164+{
49165+ struct acl_subject_label *proc;
49166+ unsigned short i;
49167+
49168+ proc = task->acl;
49169+
49170+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49171+ return;
49172+
49173+ for (i = 0; i < RLIM_NLIMITS; i++) {
49174+ if (!(proc->resmask & (1 << i)))
49175+ continue;
49176+
49177+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49178+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49179+ }
49180+
49181+ return;
49182+}
49183+
49184+extern int __gr_process_user_ban(struct user_struct *user);
49185+
49186+int
49187+gr_check_user_change(int real, int effective, int fs)
49188+{
49189+ unsigned int i;
49190+ __u16 num;
49191+ uid_t *uidlist;
49192+ int curuid;
49193+ int realok = 0;
49194+ int effectiveok = 0;
49195+ int fsok = 0;
49196+
49197+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49198+ struct user_struct *user;
49199+
49200+ if (real == -1)
49201+ goto skipit;
49202+
49203+ user = find_user(real);
49204+ if (user == NULL)
49205+ goto skipit;
49206+
49207+ if (__gr_process_user_ban(user)) {
49208+ /* for find_user */
49209+ free_uid(user);
49210+ return 1;
49211+ }
49212+
49213+ /* for find_user */
49214+ free_uid(user);
49215+
49216+skipit:
49217+#endif
49218+
49219+ if (unlikely(!(gr_status & GR_READY)))
49220+ return 0;
49221+
49222+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49223+ gr_log_learn_id_change('u', real, effective, fs);
49224+
49225+ num = current->acl->user_trans_num;
49226+ uidlist = current->acl->user_transitions;
49227+
49228+ if (uidlist == NULL)
49229+ return 0;
49230+
49231+ if (real == -1)
49232+ realok = 1;
49233+ if (effective == -1)
49234+ effectiveok = 1;
49235+ if (fs == -1)
49236+ fsok = 1;
49237+
49238+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49239+ for (i = 0; i < num; i++) {
49240+ curuid = (int)uidlist[i];
49241+ if (real == curuid)
49242+ realok = 1;
49243+ if (effective == curuid)
49244+ effectiveok = 1;
49245+ if (fs == curuid)
49246+ fsok = 1;
49247+ }
49248+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49249+ for (i = 0; i < num; i++) {
49250+ curuid = (int)uidlist[i];
49251+ if (real == curuid)
49252+ break;
49253+ if (effective == curuid)
49254+ break;
49255+ if (fs == curuid)
49256+ break;
49257+ }
49258+ /* not in deny list */
49259+ if (i == num) {
49260+ realok = 1;
49261+ effectiveok = 1;
49262+ fsok = 1;
49263+ }
49264+ }
49265+
49266+ if (realok && effectiveok && fsok)
49267+ return 0;
49268+ else {
49269+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49270+ return 1;
49271+ }
49272+}
49273+
49274+int
49275+gr_check_group_change(int real, int effective, int fs)
49276+{
49277+ unsigned int i;
49278+ __u16 num;
49279+ gid_t *gidlist;
49280+ int curgid;
49281+ int realok = 0;
49282+ int effectiveok = 0;
49283+ int fsok = 0;
49284+
49285+ if (unlikely(!(gr_status & GR_READY)))
49286+ return 0;
49287+
49288+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49289+ gr_log_learn_id_change('g', real, effective, fs);
49290+
49291+ num = current->acl->group_trans_num;
49292+ gidlist = current->acl->group_transitions;
49293+
49294+ if (gidlist == NULL)
49295+ return 0;
49296+
49297+ if (real == -1)
49298+ realok = 1;
49299+ if (effective == -1)
49300+ effectiveok = 1;
49301+ if (fs == -1)
49302+ fsok = 1;
49303+
49304+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49305+ for (i = 0; i < num; i++) {
49306+ curgid = (int)gidlist[i];
49307+ if (real == curgid)
49308+ realok = 1;
49309+ if (effective == curgid)
49310+ effectiveok = 1;
49311+ if (fs == curgid)
49312+ fsok = 1;
49313+ }
49314+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49315+ for (i = 0; i < num; i++) {
49316+ curgid = (int)gidlist[i];
49317+ if (real == curgid)
49318+ break;
49319+ if (effective == curgid)
49320+ break;
49321+ if (fs == curgid)
49322+ break;
49323+ }
49324+ /* not in deny list */
49325+ if (i == num) {
49326+ realok = 1;
49327+ effectiveok = 1;
49328+ fsok = 1;
49329+ }
49330+ }
49331+
49332+ if (realok && effectiveok && fsok)
49333+ return 0;
49334+ else {
49335+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49336+ return 1;
49337+ }
49338+}
49339+
49340+void
49341+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49342+{
49343+ struct acl_role_label *role = task->role;
49344+ struct acl_subject_label *subj = NULL;
49345+ struct acl_object_label *obj;
49346+ struct file *filp;
49347+
49348+ if (unlikely(!(gr_status & GR_READY)))
49349+ return;
49350+
49351+ filp = task->exec_file;
49352+
49353+ /* kernel process, we'll give them the kernel role */
49354+ if (unlikely(!filp)) {
49355+ task->role = kernel_role;
49356+ task->acl = kernel_role->root_label;
49357+ return;
49358+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49359+ role = lookup_acl_role_label(task, uid, gid);
49360+
49361+ /* perform subject lookup in possibly new role
49362+ we can use this result below in the case where role == task->role
49363+ */
49364+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49365+
49366+ /* if we changed uid/gid, but result in the same role
49367+ and are using inheritance, don't lose the inherited subject
49368+ if current subject is other than what normal lookup
49369+ would result in, we arrived via inheritance, don't
49370+ lose subject
49371+ */
49372+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49373+ (subj == task->acl)))
49374+ task->acl = subj;
49375+
49376+ task->role = role;
49377+
49378+ task->is_writable = 0;
49379+
49380+ /* ignore additional mmap checks for processes that are writable
49381+ by the default ACL */
49382+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49383+ if (unlikely(obj->mode & GR_WRITE))
49384+ task->is_writable = 1;
49385+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49386+ if (unlikely(obj->mode & GR_WRITE))
49387+ task->is_writable = 1;
49388+
49389+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49390+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49391+#endif
49392+
49393+ gr_set_proc_res(task);
49394+
49395+ return;
49396+}
49397+
49398+int
49399+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49400+ const int unsafe_share)
49401+{
49402+ struct task_struct *task = current;
49403+ struct acl_subject_label *newacl;
49404+ struct acl_object_label *obj;
49405+ __u32 retmode;
49406+
49407+ if (unlikely(!(gr_status & GR_READY)))
49408+ return 0;
49409+
49410+ newacl = chk_subj_label(dentry, mnt, task->role);
49411+
49412+ task_lock(task);
49413+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49414+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49415+ !(task->role->roletype & GR_ROLE_GOD) &&
49416+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49417+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49418+ task_unlock(task);
49419+ if (unsafe_share)
49420+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49421+ else
49422+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49423+ return -EACCES;
49424+ }
49425+ task_unlock(task);
49426+
49427+ obj = chk_obj_label(dentry, mnt, task->acl);
49428+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49429+
49430+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49431+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49432+ if (obj->nested)
49433+ task->acl = obj->nested;
49434+ else
49435+ task->acl = newacl;
49436+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49437+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49438+
49439+ task->is_writable = 0;
49440+
49441+ /* ignore additional mmap checks for processes that are writable
49442+ by the default ACL */
49443+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49444+ if (unlikely(obj->mode & GR_WRITE))
49445+ task->is_writable = 1;
49446+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49447+ if (unlikely(obj->mode & GR_WRITE))
49448+ task->is_writable = 1;
49449+
49450+ gr_set_proc_res(task);
49451+
49452+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49453+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49454+#endif
49455+ return 0;
49456+}
49457+
49458+/* always called with valid inodev ptr */
49459+static void
49460+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49461+{
49462+ struct acl_object_label *matchpo;
49463+ struct acl_subject_label *matchps;
49464+ struct acl_subject_label *subj;
49465+ struct acl_role_label *role;
49466+ unsigned int x;
49467+
49468+ FOR_EACH_ROLE_START(role)
49469+ FOR_EACH_SUBJECT_START(role, subj, x)
49470+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49471+ matchpo->mode |= GR_DELETED;
49472+ FOR_EACH_SUBJECT_END(subj,x)
49473+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49474+ if (subj->inode == ino && subj->device == dev)
49475+ subj->mode |= GR_DELETED;
49476+ FOR_EACH_NESTED_SUBJECT_END(subj)
49477+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49478+ matchps->mode |= GR_DELETED;
49479+ FOR_EACH_ROLE_END(role)
49480+
49481+ inodev->nentry->deleted = 1;
49482+
49483+ return;
49484+}
49485+
49486+void
49487+gr_handle_delete(const ino_t ino, const dev_t dev)
49488+{
49489+ struct inodev_entry *inodev;
49490+
49491+ if (unlikely(!(gr_status & GR_READY)))
49492+ return;
49493+
49494+ write_lock(&gr_inode_lock);
49495+ inodev = lookup_inodev_entry(ino, dev);
49496+ if (inodev != NULL)
49497+ do_handle_delete(inodev, ino, dev);
49498+ write_unlock(&gr_inode_lock);
49499+
49500+ return;
49501+}
49502+
49503+static void
49504+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49505+ const ino_t newinode, const dev_t newdevice,
49506+ struct acl_subject_label *subj)
49507+{
49508+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49509+ struct acl_object_label *match;
49510+
49511+ match = subj->obj_hash[index];
49512+
49513+ while (match && (match->inode != oldinode ||
49514+ match->device != olddevice ||
49515+ !(match->mode & GR_DELETED)))
49516+ match = match->next;
49517+
49518+ if (match && (match->inode == oldinode)
49519+ && (match->device == olddevice)
49520+ && (match->mode & GR_DELETED)) {
49521+ if (match->prev == NULL) {
49522+ subj->obj_hash[index] = match->next;
49523+ if (match->next != NULL)
49524+ match->next->prev = NULL;
49525+ } else {
49526+ match->prev->next = match->next;
49527+ if (match->next != NULL)
49528+ match->next->prev = match->prev;
49529+ }
49530+ match->prev = NULL;
49531+ match->next = NULL;
49532+ match->inode = newinode;
49533+ match->device = newdevice;
49534+ match->mode &= ~GR_DELETED;
49535+
49536+ insert_acl_obj_label(match, subj);
49537+ }
49538+
49539+ return;
49540+}
49541+
49542+static void
49543+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49544+ const ino_t newinode, const dev_t newdevice,
49545+ struct acl_role_label *role)
49546+{
49547+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49548+ struct acl_subject_label *match;
49549+
49550+ match = role->subj_hash[index];
49551+
49552+ while (match && (match->inode != oldinode ||
49553+ match->device != olddevice ||
49554+ !(match->mode & GR_DELETED)))
49555+ match = match->next;
49556+
49557+ if (match && (match->inode == oldinode)
49558+ && (match->device == olddevice)
49559+ && (match->mode & GR_DELETED)) {
49560+ if (match->prev == NULL) {
49561+ role->subj_hash[index] = match->next;
49562+ if (match->next != NULL)
49563+ match->next->prev = NULL;
49564+ } else {
49565+ match->prev->next = match->next;
49566+ if (match->next != NULL)
49567+ match->next->prev = match->prev;
49568+ }
49569+ match->prev = NULL;
49570+ match->next = NULL;
49571+ match->inode = newinode;
49572+ match->device = newdevice;
49573+ match->mode &= ~GR_DELETED;
49574+
49575+ insert_acl_subj_label(match, role);
49576+ }
49577+
49578+ return;
49579+}
49580+
49581+static void
49582+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49583+ const ino_t newinode, const dev_t newdevice)
49584+{
49585+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49586+ struct inodev_entry *match;
49587+
49588+ match = inodev_set.i_hash[index];
49589+
49590+ while (match && (match->nentry->inode != oldinode ||
49591+ match->nentry->device != olddevice || !match->nentry->deleted))
49592+ match = match->next;
49593+
49594+ if (match && (match->nentry->inode == oldinode)
49595+ && (match->nentry->device == olddevice) &&
49596+ match->nentry->deleted) {
49597+ if (match->prev == NULL) {
49598+ inodev_set.i_hash[index] = match->next;
49599+ if (match->next != NULL)
49600+ match->next->prev = NULL;
49601+ } else {
49602+ match->prev->next = match->next;
49603+ if (match->next != NULL)
49604+ match->next->prev = match->prev;
49605+ }
49606+ match->prev = NULL;
49607+ match->next = NULL;
49608+ match->nentry->inode = newinode;
49609+ match->nentry->device = newdevice;
49610+ match->nentry->deleted = 0;
49611+
49612+ insert_inodev_entry(match);
49613+ }
49614+
49615+ return;
49616+}
49617+
49618+static void
49619+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
49620+{
49621+ struct acl_subject_label *subj;
49622+ struct acl_role_label *role;
49623+ unsigned int x;
49624+
49625+ FOR_EACH_ROLE_START(role)
49626+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
49627+
49628+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49629+ if ((subj->inode == ino) && (subj->device == dev)) {
49630+ subj->inode = ino;
49631+ subj->device = dev;
49632+ }
49633+ FOR_EACH_NESTED_SUBJECT_END(subj)
49634+ FOR_EACH_SUBJECT_START(role, subj, x)
49635+ update_acl_obj_label(matchn->inode, matchn->device,
49636+ ino, dev, subj);
49637+ FOR_EACH_SUBJECT_END(subj,x)
49638+ FOR_EACH_ROLE_END(role)
49639+
49640+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
49641+
49642+ return;
49643+}
49644+
49645+static void
49646+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49647+ const struct vfsmount *mnt)
49648+{
49649+ ino_t ino = dentry->d_inode->i_ino;
49650+ dev_t dev = __get_dev(dentry);
49651+
49652+ __do_handle_create(matchn, ino, dev);
49653+
49654+ return;
49655+}
49656+
49657+void
49658+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49659+{
49660+ struct name_entry *matchn;
49661+
49662+ if (unlikely(!(gr_status & GR_READY)))
49663+ return;
49664+
49665+ preempt_disable();
49666+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49667+
49668+ if (unlikely((unsigned long)matchn)) {
49669+ write_lock(&gr_inode_lock);
49670+ do_handle_create(matchn, dentry, mnt);
49671+ write_unlock(&gr_inode_lock);
49672+ }
49673+ preempt_enable();
49674+
49675+ return;
49676+}
49677+
49678+void
49679+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
49680+{
49681+ struct name_entry *matchn;
49682+
49683+ if (unlikely(!(gr_status & GR_READY)))
49684+ return;
49685+
49686+ preempt_disable();
49687+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
49688+
49689+ if (unlikely((unsigned long)matchn)) {
49690+ write_lock(&gr_inode_lock);
49691+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
49692+ write_unlock(&gr_inode_lock);
49693+ }
49694+ preempt_enable();
49695+
49696+ return;
49697+}
49698+
49699+void
49700+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49701+ struct dentry *old_dentry,
49702+ struct dentry *new_dentry,
49703+ struct vfsmount *mnt, const __u8 replace)
49704+{
49705+ struct name_entry *matchn;
49706+ struct inodev_entry *inodev;
49707+ ino_t old_ino = old_dentry->d_inode->i_ino;
49708+ dev_t old_dev = __get_dev(old_dentry);
49709+
49710+ /* vfs_rename swaps the name and parent link for old_dentry and
49711+ new_dentry
49712+ at this point, old_dentry has the new name, parent link, and inode
49713+ for the renamed file
49714+ if a file is being replaced by a rename, new_dentry has the inode
49715+ and name for the replaced file
49716+ */
49717+
49718+ if (unlikely(!(gr_status & GR_READY)))
49719+ return;
49720+
49721+ preempt_disable();
49722+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49723+
49724+ /* we wouldn't have to check d_inode if it weren't for
49725+ NFS silly-renaming
49726+ */
49727+
49728+ write_lock(&gr_inode_lock);
49729+ if (unlikely(replace && new_dentry->d_inode)) {
49730+ ino_t new_ino = new_dentry->d_inode->i_ino;
49731+ dev_t new_dev = __get_dev(new_dentry);
49732+
49733+ inodev = lookup_inodev_entry(new_ino, new_dev);
49734+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49735+ do_handle_delete(inodev, new_ino, new_dev);
49736+ }
49737+
49738+ inodev = lookup_inodev_entry(old_ino, old_dev);
49739+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49740+ do_handle_delete(inodev, old_ino, old_dev);
49741+
49742+ if (unlikely((unsigned long)matchn))
49743+ do_handle_create(matchn, old_dentry, mnt);
49744+
49745+ write_unlock(&gr_inode_lock);
49746+ preempt_enable();
49747+
49748+ return;
49749+}
49750+
49751+static int
49752+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49753+ unsigned char **sum)
49754+{
49755+ struct acl_role_label *r;
49756+ struct role_allowed_ip *ipp;
49757+ struct role_transition *trans;
49758+ unsigned int i;
49759+ int found = 0;
49760+ u32 curr_ip = current->signal->curr_ip;
49761+
49762+ current->signal->saved_ip = curr_ip;
49763+
49764+ /* check transition table */
49765+
49766+ for (trans = current->role->transitions; trans; trans = trans->next) {
49767+ if (!strcmp(rolename, trans->rolename)) {
49768+ found = 1;
49769+ break;
49770+ }
49771+ }
49772+
49773+ if (!found)
49774+ return 0;
49775+
49776+ /* handle special roles that do not require authentication
49777+ and check ip */
49778+
49779+ FOR_EACH_ROLE_START(r)
49780+ if (!strcmp(rolename, r->rolename) &&
49781+ (r->roletype & GR_ROLE_SPECIAL)) {
49782+ found = 0;
49783+ if (r->allowed_ips != NULL) {
49784+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49785+ if ((ntohl(curr_ip) & ipp->netmask) ==
49786+ (ntohl(ipp->addr) & ipp->netmask))
49787+ found = 1;
49788+ }
49789+ } else
49790+ found = 2;
49791+ if (!found)
49792+ return 0;
49793+
49794+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49795+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49796+ *salt = NULL;
49797+ *sum = NULL;
49798+ return 1;
49799+ }
49800+ }
49801+ FOR_EACH_ROLE_END(r)
49802+
49803+ for (i = 0; i < num_sprole_pws; i++) {
49804+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49805+ *salt = acl_special_roles[i]->salt;
49806+ *sum = acl_special_roles[i]->sum;
49807+ return 1;
49808+ }
49809+ }
49810+
49811+ return 0;
49812+}
49813+
49814+static void
49815+assign_special_role(char *rolename)
49816+{
49817+ struct acl_object_label *obj;
49818+ struct acl_role_label *r;
49819+ struct acl_role_label *assigned = NULL;
49820+ struct task_struct *tsk;
49821+ struct file *filp;
49822+
49823+ FOR_EACH_ROLE_START(r)
49824+ if (!strcmp(rolename, r->rolename) &&
49825+ (r->roletype & GR_ROLE_SPECIAL)) {
49826+ assigned = r;
49827+ break;
49828+ }
49829+ FOR_EACH_ROLE_END(r)
49830+
49831+ if (!assigned)
49832+ return;
49833+
49834+ read_lock(&tasklist_lock);
49835+ read_lock(&grsec_exec_file_lock);
49836+
49837+ tsk = current->real_parent;
49838+ if (tsk == NULL)
49839+ goto out_unlock;
49840+
49841+ filp = tsk->exec_file;
49842+ if (filp == NULL)
49843+ goto out_unlock;
49844+
49845+ tsk->is_writable = 0;
49846+
49847+ tsk->acl_sp_role = 1;
49848+ tsk->acl_role_id = ++acl_sp_role_value;
49849+ tsk->role = assigned;
49850+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49851+
49852+ /* ignore additional mmap checks for processes that are writable
49853+ by the default ACL */
49854+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49855+ if (unlikely(obj->mode & GR_WRITE))
49856+ tsk->is_writable = 1;
49857+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49858+ if (unlikely(obj->mode & GR_WRITE))
49859+ tsk->is_writable = 1;
49860+
49861+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49862+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49863+#endif
49864+
49865+out_unlock:
49866+ read_unlock(&grsec_exec_file_lock);
49867+ read_unlock(&tasklist_lock);
49868+ return;
49869+}
49870+
49871+int gr_check_secure_terminal(struct task_struct *task)
49872+{
49873+ struct task_struct *p, *p2, *p3;
49874+ struct files_struct *files;
49875+ struct fdtable *fdt;
49876+ struct file *our_file = NULL, *file;
49877+ int i;
49878+
49879+ if (task->signal->tty == NULL)
49880+ return 1;
49881+
49882+ files = get_files_struct(task);
49883+ if (files != NULL) {
49884+ rcu_read_lock();
49885+ fdt = files_fdtable(files);
49886+ for (i=0; i < fdt->max_fds; i++) {
49887+ file = fcheck_files(files, i);
49888+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49889+ get_file(file);
49890+ our_file = file;
49891+ }
49892+ }
49893+ rcu_read_unlock();
49894+ put_files_struct(files);
49895+ }
49896+
49897+ if (our_file == NULL)
49898+ return 1;
49899+
49900+ read_lock(&tasklist_lock);
49901+ do_each_thread(p2, p) {
49902+ files = get_files_struct(p);
49903+ if (files == NULL ||
49904+ (p->signal && p->signal->tty == task->signal->tty)) {
49905+ if (files != NULL)
49906+ put_files_struct(files);
49907+ continue;
49908+ }
49909+ rcu_read_lock();
49910+ fdt = files_fdtable(files);
49911+ for (i=0; i < fdt->max_fds; i++) {
49912+ file = fcheck_files(files, i);
49913+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49914+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49915+ p3 = task;
49916+ while (p3->pid > 0) {
49917+ if (p3 == p)
49918+ break;
49919+ p3 = p3->real_parent;
49920+ }
49921+ if (p3 == p)
49922+ break;
49923+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49924+ gr_handle_alertkill(p);
49925+ rcu_read_unlock();
49926+ put_files_struct(files);
49927+ read_unlock(&tasklist_lock);
49928+ fput(our_file);
49929+ return 0;
49930+ }
49931+ }
49932+ rcu_read_unlock();
49933+ put_files_struct(files);
49934+ } while_each_thread(p2, p);
49935+ read_unlock(&tasklist_lock);
49936+
49937+ fput(our_file);
49938+ return 1;
49939+}
49940+
49941+ssize_t
49942+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49943+{
49944+ struct gr_arg_wrapper uwrap;
49945+ unsigned char *sprole_salt = NULL;
49946+ unsigned char *sprole_sum = NULL;
49947+ int error = sizeof (struct gr_arg_wrapper);
49948+ int error2 = 0;
49949+
49950+ mutex_lock(&gr_dev_mutex);
49951+
49952+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49953+ error = -EPERM;
49954+ goto out;
49955+ }
49956+
49957+ if (count != sizeof (struct gr_arg_wrapper)) {
49958+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49959+ error = -EINVAL;
49960+ goto out;
49961+ }
49962+
49963+
49964+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49965+ gr_auth_expires = 0;
49966+ gr_auth_attempts = 0;
49967+ }
49968+
49969+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49970+ error = -EFAULT;
49971+ goto out;
49972+ }
49973+
49974+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49975+ error = -EINVAL;
49976+ goto out;
49977+ }
49978+
49979+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49980+ error = -EFAULT;
49981+ goto out;
49982+ }
49983+
49984+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49985+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49986+ time_after(gr_auth_expires, get_seconds())) {
49987+ error = -EBUSY;
49988+ goto out;
49989+ }
49990+
49991+ /* if non-root trying to do anything other than use a special role,
49992+ do not attempt authentication, do not count towards authentication
49993+ locking
49994+ */
49995+
49996+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49997+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49998+ current_uid()) {
49999+ error = -EPERM;
50000+ goto out;
50001+ }
50002+
50003+ /* ensure pw and special role name are null terminated */
50004+
50005+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50006+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50007+
50008+ /* Okay.
50009+ * We have our enough of the argument structure..(we have yet
50010+ * to copy_from_user the tables themselves) . Copy the tables
50011+ * only if we need them, i.e. for loading operations. */
50012+
50013+ switch (gr_usermode->mode) {
50014+ case GR_STATUS:
50015+ if (gr_status & GR_READY) {
50016+ error = 1;
50017+ if (!gr_check_secure_terminal(current))
50018+ error = 3;
50019+ } else
50020+ error = 2;
50021+ goto out;
50022+ case GR_SHUTDOWN:
50023+ if ((gr_status & GR_READY)
50024+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50025+ pax_open_kernel();
50026+ gr_status &= ~GR_READY;
50027+ pax_close_kernel();
50028+
50029+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50030+ free_variables();
50031+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50032+ memset(gr_system_salt, 0, GR_SALT_LEN);
50033+ memset(gr_system_sum, 0, GR_SHA_LEN);
50034+ } else if (gr_status & GR_READY) {
50035+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50036+ error = -EPERM;
50037+ } else {
50038+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50039+ error = -EAGAIN;
50040+ }
50041+ break;
50042+ case GR_ENABLE:
50043+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50044+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50045+ else {
50046+ if (gr_status & GR_READY)
50047+ error = -EAGAIN;
50048+ else
50049+ error = error2;
50050+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50051+ }
50052+ break;
50053+ case GR_RELOAD:
50054+ if (!(gr_status & GR_READY)) {
50055+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50056+ error = -EAGAIN;
50057+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50058+ preempt_disable();
50059+
50060+ pax_open_kernel();
50061+ gr_status &= ~GR_READY;
50062+ pax_close_kernel();
50063+
50064+ free_variables();
50065+ if (!(error2 = gracl_init(gr_usermode))) {
50066+ preempt_enable();
50067+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50068+ } else {
50069+ preempt_enable();
50070+ error = error2;
50071+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50072+ }
50073+ } else {
50074+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50075+ error = -EPERM;
50076+ }
50077+ break;
50078+ case GR_SEGVMOD:
50079+ if (unlikely(!(gr_status & GR_READY))) {
50080+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50081+ error = -EAGAIN;
50082+ break;
50083+ }
50084+
50085+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50086+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50087+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50088+ struct acl_subject_label *segvacl;
50089+ segvacl =
50090+ lookup_acl_subj_label(gr_usermode->segv_inode,
50091+ gr_usermode->segv_device,
50092+ current->role);
50093+ if (segvacl) {
50094+ segvacl->crashes = 0;
50095+ segvacl->expires = 0;
50096+ }
50097+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50098+ gr_remove_uid(gr_usermode->segv_uid);
50099+ }
50100+ } else {
50101+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50102+ error = -EPERM;
50103+ }
50104+ break;
50105+ case GR_SPROLE:
50106+ case GR_SPROLEPAM:
50107+ if (unlikely(!(gr_status & GR_READY))) {
50108+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50109+ error = -EAGAIN;
50110+ break;
50111+ }
50112+
50113+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50114+ current->role->expires = 0;
50115+ current->role->auth_attempts = 0;
50116+ }
50117+
50118+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50119+ time_after(current->role->expires, get_seconds())) {
50120+ error = -EBUSY;
50121+ goto out;
50122+ }
50123+
50124+ if (lookup_special_role_auth
50125+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50126+ && ((!sprole_salt && !sprole_sum)
50127+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50128+ char *p = "";
50129+ assign_special_role(gr_usermode->sp_role);
50130+ read_lock(&tasklist_lock);
50131+ if (current->real_parent)
50132+ p = current->real_parent->role->rolename;
50133+ read_unlock(&tasklist_lock);
50134+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50135+ p, acl_sp_role_value);
50136+ } else {
50137+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50138+ error = -EPERM;
50139+ if(!(current->role->auth_attempts++))
50140+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50141+
50142+ goto out;
50143+ }
50144+ break;
50145+ case GR_UNSPROLE:
50146+ if (unlikely(!(gr_status & GR_READY))) {
50147+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50148+ error = -EAGAIN;
50149+ break;
50150+ }
50151+
50152+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50153+ char *p = "";
50154+ int i = 0;
50155+
50156+ read_lock(&tasklist_lock);
50157+ if (current->real_parent) {
50158+ p = current->real_parent->role->rolename;
50159+ i = current->real_parent->acl_role_id;
50160+ }
50161+ read_unlock(&tasklist_lock);
50162+
50163+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50164+ gr_set_acls(1);
50165+ } else {
50166+ error = -EPERM;
50167+ goto out;
50168+ }
50169+ break;
50170+ default:
50171+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50172+ error = -EINVAL;
50173+ break;
50174+ }
50175+
50176+ if (error != -EPERM)
50177+ goto out;
50178+
50179+ if(!(gr_auth_attempts++))
50180+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50181+
50182+ out:
50183+ mutex_unlock(&gr_dev_mutex);
50184+ return error;
50185+}
50186+
50187+/* must be called with
50188+ rcu_read_lock();
50189+ read_lock(&tasklist_lock);
50190+ read_lock(&grsec_exec_file_lock);
50191+*/
50192+int gr_apply_subject_to_task(struct task_struct *task)
50193+{
50194+ struct acl_object_label *obj;
50195+ char *tmpname;
50196+ struct acl_subject_label *tmpsubj;
50197+ struct file *filp;
50198+ struct name_entry *nmatch;
50199+
50200+ filp = task->exec_file;
50201+ if (filp == NULL)
50202+ return 0;
50203+
50204+ /* the following is to apply the correct subject
50205+ on binaries running when the RBAC system
50206+ is enabled, when the binaries have been
50207+ replaced or deleted since their execution
50208+ -----
50209+ when the RBAC system starts, the inode/dev
50210+ from exec_file will be one the RBAC system
50211+ is unaware of. It only knows the inode/dev
50212+ of the present file on disk, or the absence
50213+ of it.
50214+ */
50215+ preempt_disable();
50216+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50217+
50218+ nmatch = lookup_name_entry(tmpname);
50219+ preempt_enable();
50220+ tmpsubj = NULL;
50221+ if (nmatch) {
50222+ if (nmatch->deleted)
50223+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50224+ else
50225+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50226+ if (tmpsubj != NULL)
50227+ task->acl = tmpsubj;
50228+ }
50229+ if (tmpsubj == NULL)
50230+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50231+ task->role);
50232+ if (task->acl) {
50233+ task->is_writable = 0;
50234+ /* ignore additional mmap checks for processes that are writable
50235+ by the default ACL */
50236+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50237+ if (unlikely(obj->mode & GR_WRITE))
50238+ task->is_writable = 1;
50239+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50240+ if (unlikely(obj->mode & GR_WRITE))
50241+ task->is_writable = 1;
50242+
50243+ gr_set_proc_res(task);
50244+
50245+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50246+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50247+#endif
50248+ } else {
50249+ return 1;
50250+ }
50251+
50252+ return 0;
50253+}
50254+
50255+int
50256+gr_set_acls(const int type)
50257+{
50258+ struct task_struct *task, *task2;
50259+ struct acl_role_label *role = current->role;
50260+ __u16 acl_role_id = current->acl_role_id;
50261+ const struct cred *cred;
50262+ int ret;
50263+
50264+ rcu_read_lock();
50265+ read_lock(&tasklist_lock);
50266+ read_lock(&grsec_exec_file_lock);
50267+ do_each_thread(task2, task) {
50268+ /* check to see if we're called from the exit handler,
50269+ if so, only replace ACLs that have inherited the admin
50270+ ACL */
50271+
50272+ if (type && (task->role != role ||
50273+ task->acl_role_id != acl_role_id))
50274+ continue;
50275+
50276+ task->acl_role_id = 0;
50277+ task->acl_sp_role = 0;
50278+
50279+ if (task->exec_file) {
50280+ cred = __task_cred(task);
50281+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50282+ ret = gr_apply_subject_to_task(task);
50283+ if (ret) {
50284+ read_unlock(&grsec_exec_file_lock);
50285+ read_unlock(&tasklist_lock);
50286+ rcu_read_unlock();
50287+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50288+ return ret;
50289+ }
50290+ } else {
50291+ // it's a kernel process
50292+ task->role = kernel_role;
50293+ task->acl = kernel_role->root_label;
50294+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50295+ task->acl->mode &= ~GR_PROCFIND;
50296+#endif
50297+ }
50298+ } while_each_thread(task2, task);
50299+ read_unlock(&grsec_exec_file_lock);
50300+ read_unlock(&tasklist_lock);
50301+ rcu_read_unlock();
50302+
50303+ return 0;
50304+}
50305+
50306+void
50307+gr_learn_resource(const struct task_struct *task,
50308+ const int res, const unsigned long wanted, const int gt)
50309+{
50310+ struct acl_subject_label *acl;
50311+ const struct cred *cred;
50312+
50313+ if (unlikely((gr_status & GR_READY) &&
50314+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50315+ goto skip_reslog;
50316+
50317+#ifdef CONFIG_GRKERNSEC_RESLOG
50318+ gr_log_resource(task, res, wanted, gt);
50319+#endif
50320+ skip_reslog:
50321+
50322+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50323+ return;
50324+
50325+ acl = task->acl;
50326+
50327+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50328+ !(acl->resmask & (1 << (unsigned short) res))))
50329+ return;
50330+
50331+ if (wanted >= acl->res[res].rlim_cur) {
50332+ unsigned long res_add;
50333+
50334+ res_add = wanted;
50335+ switch (res) {
50336+ case RLIMIT_CPU:
50337+ res_add += GR_RLIM_CPU_BUMP;
50338+ break;
50339+ case RLIMIT_FSIZE:
50340+ res_add += GR_RLIM_FSIZE_BUMP;
50341+ break;
50342+ case RLIMIT_DATA:
50343+ res_add += GR_RLIM_DATA_BUMP;
50344+ break;
50345+ case RLIMIT_STACK:
50346+ res_add += GR_RLIM_STACK_BUMP;
50347+ break;
50348+ case RLIMIT_CORE:
50349+ res_add += GR_RLIM_CORE_BUMP;
50350+ break;
50351+ case RLIMIT_RSS:
50352+ res_add += GR_RLIM_RSS_BUMP;
50353+ break;
50354+ case RLIMIT_NPROC:
50355+ res_add += GR_RLIM_NPROC_BUMP;
50356+ break;
50357+ case RLIMIT_NOFILE:
50358+ res_add += GR_RLIM_NOFILE_BUMP;
50359+ break;
50360+ case RLIMIT_MEMLOCK:
50361+ res_add += GR_RLIM_MEMLOCK_BUMP;
50362+ break;
50363+ case RLIMIT_AS:
50364+ res_add += GR_RLIM_AS_BUMP;
50365+ break;
50366+ case RLIMIT_LOCKS:
50367+ res_add += GR_RLIM_LOCKS_BUMP;
50368+ break;
50369+ case RLIMIT_SIGPENDING:
50370+ res_add += GR_RLIM_SIGPENDING_BUMP;
50371+ break;
50372+ case RLIMIT_MSGQUEUE:
50373+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50374+ break;
50375+ case RLIMIT_NICE:
50376+ res_add += GR_RLIM_NICE_BUMP;
50377+ break;
50378+ case RLIMIT_RTPRIO:
50379+ res_add += GR_RLIM_RTPRIO_BUMP;
50380+ break;
50381+ case RLIMIT_RTTIME:
50382+ res_add += GR_RLIM_RTTIME_BUMP;
50383+ break;
50384+ }
50385+
50386+ acl->res[res].rlim_cur = res_add;
50387+
50388+ if (wanted > acl->res[res].rlim_max)
50389+ acl->res[res].rlim_max = res_add;
50390+
50391+ /* only log the subject filename, since resource logging is supported for
50392+ single-subject learning only */
50393+ rcu_read_lock();
50394+ cred = __task_cred(task);
50395+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50396+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50397+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50398+ "", (unsigned long) res, &task->signal->saved_ip);
50399+ rcu_read_unlock();
50400+ }
50401+
50402+ return;
50403+}
50404+
50405+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50406+void
50407+pax_set_initial_flags(struct linux_binprm *bprm)
50408+{
50409+ struct task_struct *task = current;
50410+ struct acl_subject_label *proc;
50411+ unsigned long flags;
50412+
50413+ if (unlikely(!(gr_status & GR_READY)))
50414+ return;
50415+
50416+ flags = pax_get_flags(task);
50417+
50418+ proc = task->acl;
50419+
50420+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50421+ flags &= ~MF_PAX_PAGEEXEC;
50422+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50423+ flags &= ~MF_PAX_SEGMEXEC;
50424+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50425+ flags &= ~MF_PAX_RANDMMAP;
50426+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50427+ flags &= ~MF_PAX_EMUTRAMP;
50428+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50429+ flags &= ~MF_PAX_MPROTECT;
50430+
50431+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50432+ flags |= MF_PAX_PAGEEXEC;
50433+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50434+ flags |= MF_PAX_SEGMEXEC;
50435+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50436+ flags |= MF_PAX_RANDMMAP;
50437+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50438+ flags |= MF_PAX_EMUTRAMP;
50439+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50440+ flags |= MF_PAX_MPROTECT;
50441+
50442+ pax_set_flags(task, flags);
50443+
50444+ return;
50445+}
50446+#endif
50447+
50448+#ifdef CONFIG_SYSCTL
50449+/* Eric Biederman likes breaking userland ABI and every inode-based security
50450+ system to save 35kb of memory */
50451+
50452+/* we modify the passed in filename, but adjust it back before returning */
50453+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50454+{
50455+ struct name_entry *nmatch;
50456+ char *p, *lastp = NULL;
50457+ struct acl_object_label *obj = NULL, *tmp;
50458+ struct acl_subject_label *tmpsubj;
50459+ char c = '\0';
50460+
50461+ read_lock(&gr_inode_lock);
50462+
50463+ p = name + len - 1;
50464+ do {
50465+ nmatch = lookup_name_entry(name);
50466+ if (lastp != NULL)
50467+ *lastp = c;
50468+
50469+ if (nmatch == NULL)
50470+ goto next_component;
50471+ tmpsubj = current->acl;
50472+ do {
50473+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50474+ if (obj != NULL) {
50475+ tmp = obj->globbed;
50476+ while (tmp) {
50477+ if (!glob_match(tmp->filename, name)) {
50478+ obj = tmp;
50479+ goto found_obj;
50480+ }
50481+ tmp = tmp->next;
50482+ }
50483+ goto found_obj;
50484+ }
50485+ } while ((tmpsubj = tmpsubj->parent_subject));
50486+next_component:
50487+ /* end case */
50488+ if (p == name)
50489+ break;
50490+
50491+ while (*p != '/')
50492+ p--;
50493+ if (p == name)
50494+ lastp = p + 1;
50495+ else {
50496+ lastp = p;
50497+ p--;
50498+ }
50499+ c = *lastp;
50500+ *lastp = '\0';
50501+ } while (1);
50502+found_obj:
50503+ read_unlock(&gr_inode_lock);
50504+ /* obj returned will always be non-null */
50505+ return obj;
50506+}
50507+
50508+/* returns 0 when allowing, non-zero on error
50509+ op of 0 is used for readdir, so we don't log the names of hidden files
50510+*/
50511+__u32
50512+gr_handle_sysctl(const struct ctl_table *table, const int op)
50513+{
50514+ struct ctl_table *tmp;
50515+ const char *proc_sys = "/proc/sys";
50516+ char *path;
50517+ struct acl_object_label *obj;
50518+ unsigned short len = 0, pos = 0, depth = 0, i;
50519+ __u32 err = 0;
50520+ __u32 mode = 0;
50521+
50522+ if (unlikely(!(gr_status & GR_READY)))
50523+ return 0;
50524+
50525+ /* for now, ignore operations on non-sysctl entries if it's not a
50526+ readdir*/
50527+ if (table->child != NULL && op != 0)
50528+ return 0;
50529+
50530+ mode |= GR_FIND;
50531+ /* it's only a read if it's an entry, read on dirs is for readdir */
50532+ if (op & MAY_READ)
50533+ mode |= GR_READ;
50534+ if (op & MAY_WRITE)
50535+ mode |= GR_WRITE;
50536+
50537+ preempt_disable();
50538+
50539+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50540+
50541+ /* it's only a read/write if it's an actual entry, not a dir
50542+ (which are opened for readdir)
50543+ */
50544+
50545+ /* convert the requested sysctl entry into a pathname */
50546+
50547+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50548+ len += strlen(tmp->procname);
50549+ len++;
50550+ depth++;
50551+ }
50552+
50553+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50554+ /* deny */
50555+ goto out;
50556+ }
50557+
50558+ memset(path, 0, PAGE_SIZE);
50559+
50560+ memcpy(path, proc_sys, strlen(proc_sys));
50561+
50562+ pos += strlen(proc_sys);
50563+
50564+ for (; depth > 0; depth--) {
50565+ path[pos] = '/';
50566+ pos++;
50567+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50568+ if (depth == i) {
50569+ memcpy(path + pos, tmp->procname,
50570+ strlen(tmp->procname));
50571+ pos += strlen(tmp->procname);
50572+ }
50573+ i++;
50574+ }
50575+ }
50576+
50577+ obj = gr_lookup_by_name(path, pos);
50578+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50579+
50580+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50581+ ((err & mode) != mode))) {
50582+ __u32 new_mode = mode;
50583+
50584+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50585+
50586+ err = 0;
50587+ gr_log_learn_sysctl(path, new_mode);
50588+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50589+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50590+ err = -ENOENT;
50591+ } else if (!(err & GR_FIND)) {
50592+ err = -ENOENT;
50593+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50594+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50595+ path, (mode & GR_READ) ? " reading" : "",
50596+ (mode & GR_WRITE) ? " writing" : "");
50597+ err = -EACCES;
50598+ } else if ((err & mode) != mode) {
50599+ err = -EACCES;
50600+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50601+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50602+ path, (mode & GR_READ) ? " reading" : "",
50603+ (mode & GR_WRITE) ? " writing" : "");
50604+ err = 0;
50605+ } else
50606+ err = 0;
50607+
50608+ out:
50609+ preempt_enable();
50610+
50611+ return err;
50612+}
50613+#endif
50614+
50615+int
50616+gr_handle_proc_ptrace(struct task_struct *task)
50617+{
50618+ struct file *filp;
50619+ struct task_struct *tmp = task;
50620+ struct task_struct *curtemp = current;
50621+ __u32 retmode;
50622+
50623+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50624+ if (unlikely(!(gr_status & GR_READY)))
50625+ return 0;
50626+#endif
50627+
50628+ read_lock(&tasklist_lock);
50629+ read_lock(&grsec_exec_file_lock);
50630+ filp = task->exec_file;
50631+
50632+ while (tmp->pid > 0) {
50633+ if (tmp == curtemp)
50634+ break;
50635+ tmp = tmp->real_parent;
50636+ }
50637+
50638+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50639+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50640+ read_unlock(&grsec_exec_file_lock);
50641+ read_unlock(&tasklist_lock);
50642+ return 1;
50643+ }
50644+
50645+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50646+ if (!(gr_status & GR_READY)) {
50647+ read_unlock(&grsec_exec_file_lock);
50648+ read_unlock(&tasklist_lock);
50649+ return 0;
50650+ }
50651+#endif
50652+
50653+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50654+ read_unlock(&grsec_exec_file_lock);
50655+ read_unlock(&tasklist_lock);
50656+
50657+ if (retmode & GR_NOPTRACE)
50658+ return 1;
50659+
50660+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50661+ && (current->acl != task->acl || (current->acl != current->role->root_label
50662+ && current->pid != task->pid)))
50663+ return 1;
50664+
50665+ return 0;
50666+}
50667+
50668+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50669+{
50670+ if (unlikely(!(gr_status & GR_READY)))
50671+ return;
50672+
50673+ if (!(current->role->roletype & GR_ROLE_GOD))
50674+ return;
50675+
50676+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50677+ p->role->rolename, gr_task_roletype_to_char(p),
50678+ p->acl->filename);
50679+}
50680+
50681+int
50682+gr_handle_ptrace(struct task_struct *task, const long request)
50683+{
50684+ struct task_struct *tmp = task;
50685+ struct task_struct *curtemp = current;
50686+ __u32 retmode;
50687+
50688+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50689+ if (unlikely(!(gr_status & GR_READY)))
50690+ return 0;
50691+#endif
50692+
50693+ read_lock(&tasklist_lock);
50694+ while (tmp->pid > 0) {
50695+ if (tmp == curtemp)
50696+ break;
50697+ tmp = tmp->real_parent;
50698+ }
50699+
50700+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50701+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50702+ read_unlock(&tasklist_lock);
50703+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50704+ return 1;
50705+ }
50706+ read_unlock(&tasklist_lock);
50707+
50708+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50709+ if (!(gr_status & GR_READY))
50710+ return 0;
50711+#endif
50712+
50713+ read_lock(&grsec_exec_file_lock);
50714+ if (unlikely(!task->exec_file)) {
50715+ read_unlock(&grsec_exec_file_lock);
50716+ return 0;
50717+ }
50718+
50719+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50720+ read_unlock(&grsec_exec_file_lock);
50721+
50722+ if (retmode & GR_NOPTRACE) {
50723+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50724+ return 1;
50725+ }
50726+
50727+ if (retmode & GR_PTRACERD) {
50728+ switch (request) {
50729+ case PTRACE_POKETEXT:
50730+ case PTRACE_POKEDATA:
50731+ case PTRACE_POKEUSR:
50732+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50733+ case PTRACE_SETREGS:
50734+ case PTRACE_SETFPREGS:
50735+#endif
50736+#ifdef CONFIG_X86
50737+ case PTRACE_SETFPXREGS:
50738+#endif
50739+#ifdef CONFIG_ALTIVEC
50740+ case PTRACE_SETVRREGS:
50741+#endif
50742+ return 1;
50743+ default:
50744+ return 0;
50745+ }
50746+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50747+ !(current->role->roletype & GR_ROLE_GOD) &&
50748+ (current->acl != task->acl)) {
50749+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50750+ return 1;
50751+ }
50752+
50753+ return 0;
50754+}
50755+
50756+static int is_writable_mmap(const struct file *filp)
50757+{
50758+ struct task_struct *task = current;
50759+ struct acl_object_label *obj, *obj2;
50760+
50761+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50762+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50763+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50764+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50765+ task->role->root_label);
50766+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50767+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50768+ return 1;
50769+ }
50770+ }
50771+ return 0;
50772+}
50773+
50774+int
50775+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50776+{
50777+ __u32 mode;
50778+
50779+ if (unlikely(!file || !(prot & PROT_EXEC)))
50780+ return 1;
50781+
50782+ if (is_writable_mmap(file))
50783+ return 0;
50784+
50785+ mode =
50786+ gr_search_file(file->f_path.dentry,
50787+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50788+ file->f_path.mnt);
50789+
50790+ if (!gr_tpe_allow(file))
50791+ return 0;
50792+
50793+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50794+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50795+ return 0;
50796+ } else if (unlikely(!(mode & GR_EXEC))) {
50797+ return 0;
50798+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50799+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50800+ return 1;
50801+ }
50802+
50803+ return 1;
50804+}
50805+
50806+int
50807+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50808+{
50809+ __u32 mode;
50810+
50811+ if (unlikely(!file || !(prot & PROT_EXEC)))
50812+ return 1;
50813+
50814+ if (is_writable_mmap(file))
50815+ return 0;
50816+
50817+ mode =
50818+ gr_search_file(file->f_path.dentry,
50819+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50820+ file->f_path.mnt);
50821+
50822+ if (!gr_tpe_allow(file))
50823+ return 0;
50824+
50825+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50826+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50827+ return 0;
50828+ } else if (unlikely(!(mode & GR_EXEC))) {
50829+ return 0;
50830+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50831+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50832+ return 1;
50833+ }
50834+
50835+ return 1;
50836+}
50837+
50838+void
50839+gr_acl_handle_psacct(struct task_struct *task, const long code)
50840+{
50841+ unsigned long runtime;
50842+ unsigned long cputime;
50843+ unsigned int wday, cday;
50844+ __u8 whr, chr;
50845+ __u8 wmin, cmin;
50846+ __u8 wsec, csec;
50847+ struct timespec timeval;
50848+
50849+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50850+ !(task->acl->mode & GR_PROCACCT)))
50851+ return;
50852+
50853+ do_posix_clock_monotonic_gettime(&timeval);
50854+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50855+ wday = runtime / (3600 * 24);
50856+ runtime -= wday * (3600 * 24);
50857+ whr = runtime / 3600;
50858+ runtime -= whr * 3600;
50859+ wmin = runtime / 60;
50860+ runtime -= wmin * 60;
50861+ wsec = runtime;
50862+
50863+ cputime = (task->utime + task->stime) / HZ;
50864+ cday = cputime / (3600 * 24);
50865+ cputime -= cday * (3600 * 24);
50866+ chr = cputime / 3600;
50867+ cputime -= chr * 3600;
50868+ cmin = cputime / 60;
50869+ cputime -= cmin * 60;
50870+ csec = cputime;
50871+
50872+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50873+
50874+ return;
50875+}
50876+
50877+void gr_set_kernel_label(struct task_struct *task)
50878+{
50879+ if (gr_status & GR_READY) {
50880+ task->role = kernel_role;
50881+ task->acl = kernel_role->root_label;
50882+ }
50883+ return;
50884+}
50885+
50886+#ifdef CONFIG_TASKSTATS
50887+int gr_is_taskstats_denied(int pid)
50888+{
50889+ struct task_struct *task;
50890+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50891+ const struct cred *cred;
50892+#endif
50893+ int ret = 0;
50894+
50895+ /* restrict taskstats viewing to un-chrooted root users
50896+ who have the 'view' subject flag if the RBAC system is enabled
50897+ */
50898+
50899+ rcu_read_lock();
50900+ read_lock(&tasklist_lock);
50901+ task = find_task_by_vpid(pid);
50902+ if (task) {
50903+#ifdef CONFIG_GRKERNSEC_CHROOT
50904+ if (proc_is_chrooted(task))
50905+ ret = -EACCES;
50906+#endif
50907+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50908+ cred = __task_cred(task);
50909+#ifdef CONFIG_GRKERNSEC_PROC_USER
50910+ if (cred->uid != 0)
50911+ ret = -EACCES;
50912+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50913+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50914+ ret = -EACCES;
50915+#endif
50916+#endif
50917+ if (gr_status & GR_READY) {
50918+ if (!(task->acl->mode & GR_VIEW))
50919+ ret = -EACCES;
50920+ }
50921+ } else
50922+ ret = -ENOENT;
50923+
50924+ read_unlock(&tasklist_lock);
50925+ rcu_read_unlock();
50926+
50927+ return ret;
50928+}
50929+#endif
50930+
50931+/* AUXV entries are filled via a descendant of search_binary_handler
50932+ after we've already applied the subject for the target
50933+*/
50934+int gr_acl_enable_at_secure(void)
50935+{
50936+ if (unlikely(!(gr_status & GR_READY)))
50937+ return 0;
50938+
50939+ if (current->acl->mode & GR_ATSECURE)
50940+ return 1;
50941+
50942+ return 0;
50943+}
50944+
50945+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50946+{
50947+ struct task_struct *task = current;
50948+ struct dentry *dentry = file->f_path.dentry;
50949+ struct vfsmount *mnt = file->f_path.mnt;
50950+ struct acl_object_label *obj, *tmp;
50951+ struct acl_subject_label *subj;
50952+ unsigned int bufsize;
50953+ int is_not_root;
50954+ char *path;
50955+ dev_t dev = __get_dev(dentry);
50956+
50957+ if (unlikely(!(gr_status & GR_READY)))
50958+ return 1;
50959+
50960+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50961+ return 1;
50962+
50963+ /* ignore Eric Biederman */
50964+ if (IS_PRIVATE(dentry->d_inode))
50965+ return 1;
50966+
50967+ subj = task->acl;
50968+ do {
50969+ obj = lookup_acl_obj_label(ino, dev, subj);
50970+ if (obj != NULL)
50971+ return (obj->mode & GR_FIND) ? 1 : 0;
50972+ } while ((subj = subj->parent_subject));
50973+
50974+ /* this is purely an optimization since we're looking for an object
50975+ for the directory we're doing a readdir on
50976+ if it's possible for any globbed object to match the entry we're
50977+ filling into the directory, then the object we find here will be
50978+ an anchor point with attached globbed objects
50979+ */
50980+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50981+ if (obj->globbed == NULL)
50982+ return (obj->mode & GR_FIND) ? 1 : 0;
50983+
50984+ is_not_root = ((obj->filename[0] == '/') &&
50985+ (obj->filename[1] == '\0')) ? 0 : 1;
50986+ bufsize = PAGE_SIZE - namelen - is_not_root;
50987+
50988+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50989+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50990+ return 1;
50991+
50992+ preempt_disable();
50993+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50994+ bufsize);
50995+
50996+ bufsize = strlen(path);
50997+
50998+ /* if base is "/", don't append an additional slash */
50999+ if (is_not_root)
51000+ *(path + bufsize) = '/';
51001+ memcpy(path + bufsize + is_not_root, name, namelen);
51002+ *(path + bufsize + namelen + is_not_root) = '\0';
51003+
51004+ tmp = obj->globbed;
51005+ while (tmp) {
51006+ if (!glob_match(tmp->filename, path)) {
51007+ preempt_enable();
51008+ return (tmp->mode & GR_FIND) ? 1 : 0;
51009+ }
51010+ tmp = tmp->next;
51011+ }
51012+ preempt_enable();
51013+ return (obj->mode & GR_FIND) ? 1 : 0;
51014+}
51015+
51016+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51017+EXPORT_SYMBOL(gr_acl_is_enabled);
51018+#endif
51019+EXPORT_SYMBOL(gr_learn_resource);
51020+EXPORT_SYMBOL(gr_set_kernel_label);
51021+#ifdef CONFIG_SECURITY
51022+EXPORT_SYMBOL(gr_check_user_change);
51023+EXPORT_SYMBOL(gr_check_group_change);
51024+#endif
51025+
51026diff -urNp linux-3.0.7/grsecurity/gracl_cap.c linux-3.0.7/grsecurity/gracl_cap.c
51027--- linux-3.0.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51028+++ linux-3.0.7/grsecurity/gracl_cap.c 2011-09-14 09:21:24.000000000 -0400
51029@@ -0,0 +1,101 @@
51030+#include <linux/kernel.h>
51031+#include <linux/module.h>
51032+#include <linux/sched.h>
51033+#include <linux/gracl.h>
51034+#include <linux/grsecurity.h>
51035+#include <linux/grinternal.h>
51036+
51037+extern const char *captab_log[];
51038+extern int captab_log_entries;
51039+
51040+int
51041+gr_acl_is_capable(const int cap)
51042+{
51043+ struct task_struct *task = current;
51044+ const struct cred *cred = current_cred();
51045+ struct acl_subject_label *curracl;
51046+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51047+ kernel_cap_t cap_audit = __cap_empty_set;
51048+
51049+ if (!gr_acl_is_enabled())
51050+ return 1;
51051+
51052+ curracl = task->acl;
51053+
51054+ cap_drop = curracl->cap_lower;
51055+ cap_mask = curracl->cap_mask;
51056+ cap_audit = curracl->cap_invert_audit;
51057+
51058+ while ((curracl = curracl->parent_subject)) {
51059+ /* if the cap isn't specified in the current computed mask but is specified in the
51060+ current level subject, and is lowered in the current level subject, then add
51061+ it to the set of dropped capabilities
51062+ otherwise, add the current level subject's mask to the current computed mask
51063+ */
51064+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51065+ cap_raise(cap_mask, cap);
51066+ if (cap_raised(curracl->cap_lower, cap))
51067+ cap_raise(cap_drop, cap);
51068+ if (cap_raised(curracl->cap_invert_audit, cap))
51069+ cap_raise(cap_audit, cap);
51070+ }
51071+ }
51072+
51073+ if (!cap_raised(cap_drop, cap)) {
51074+ if (cap_raised(cap_audit, cap))
51075+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51076+ return 1;
51077+ }
51078+
51079+ curracl = task->acl;
51080+
51081+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51082+ && cap_raised(cred->cap_effective, cap)) {
51083+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51084+ task->role->roletype, cred->uid,
51085+ cred->gid, task->exec_file ?
51086+ gr_to_filename(task->exec_file->f_path.dentry,
51087+ task->exec_file->f_path.mnt) : curracl->filename,
51088+ curracl->filename, 0UL,
51089+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51090+ return 1;
51091+ }
51092+
51093+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51094+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51095+ return 0;
51096+}
51097+
51098+int
51099+gr_acl_is_capable_nolog(const int cap)
51100+{
51101+ struct acl_subject_label *curracl;
51102+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51103+
51104+ if (!gr_acl_is_enabled())
51105+ return 1;
51106+
51107+ curracl = current->acl;
51108+
51109+ cap_drop = curracl->cap_lower;
51110+ cap_mask = curracl->cap_mask;
51111+
51112+ while ((curracl = curracl->parent_subject)) {
51113+ /* if the cap isn't specified in the current computed mask but is specified in the
51114+ current level subject, and is lowered in the current level subject, then add
51115+ it to the set of dropped capabilities
51116+ otherwise, add the current level subject's mask to the current computed mask
51117+ */
51118+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51119+ cap_raise(cap_mask, cap);
51120+ if (cap_raised(curracl->cap_lower, cap))
51121+ cap_raise(cap_drop, cap);
51122+ }
51123+ }
51124+
51125+ if (!cap_raised(cap_drop, cap))
51126+ return 1;
51127+
51128+ return 0;
51129+}
51130+
51131diff -urNp linux-3.0.7/grsecurity/gracl_fs.c linux-3.0.7/grsecurity/gracl_fs.c
51132--- linux-3.0.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51133+++ linux-3.0.7/grsecurity/gracl_fs.c 2011-10-17 01:22:26.000000000 -0400
51134@@ -0,0 +1,431 @@
51135+#include <linux/kernel.h>
51136+#include <linux/sched.h>
51137+#include <linux/types.h>
51138+#include <linux/fs.h>
51139+#include <linux/file.h>
51140+#include <linux/stat.h>
51141+#include <linux/grsecurity.h>
51142+#include <linux/grinternal.h>
51143+#include <linux/gracl.h>
51144+
51145+__u32
51146+gr_acl_handle_hidden_file(const struct dentry * dentry,
51147+ const struct vfsmount * mnt)
51148+{
51149+ __u32 mode;
51150+
51151+ if (unlikely(!dentry->d_inode))
51152+ return GR_FIND;
51153+
51154+ mode =
51155+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51156+
51157+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51158+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51159+ return mode;
51160+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51161+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51162+ return 0;
51163+ } else if (unlikely(!(mode & GR_FIND)))
51164+ return 0;
51165+
51166+ return GR_FIND;
51167+}
51168+
51169+__u32
51170+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51171+ const int fmode)
51172+{
51173+ __u32 reqmode = GR_FIND;
51174+ __u32 mode;
51175+
51176+ if (unlikely(!dentry->d_inode))
51177+ return reqmode;
51178+
51179+ if (unlikely(fmode & O_APPEND))
51180+ reqmode |= GR_APPEND;
51181+ else if (unlikely(fmode & FMODE_WRITE))
51182+ reqmode |= GR_WRITE;
51183+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51184+ reqmode |= GR_READ;
51185+ if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
51186+ reqmode &= ~GR_READ;
51187+ mode =
51188+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51189+ mnt);
51190+
51191+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51192+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51193+ reqmode & GR_READ ? " reading" : "",
51194+ reqmode & GR_WRITE ? " writing" : reqmode &
51195+ GR_APPEND ? " appending" : "");
51196+ return reqmode;
51197+ } else
51198+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51199+ {
51200+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51201+ reqmode & GR_READ ? " reading" : "",
51202+ reqmode & GR_WRITE ? " writing" : reqmode &
51203+ GR_APPEND ? " appending" : "");
51204+ return 0;
51205+ } else if (unlikely((mode & reqmode) != reqmode))
51206+ return 0;
51207+
51208+ return reqmode;
51209+}
51210+
51211+__u32
51212+gr_acl_handle_creat(const struct dentry * dentry,
51213+ const struct dentry * p_dentry,
51214+ const struct vfsmount * p_mnt, const int fmode,
51215+ const int imode)
51216+{
51217+ __u32 reqmode = GR_WRITE | GR_CREATE;
51218+ __u32 mode;
51219+
51220+ if (unlikely(fmode & O_APPEND))
51221+ reqmode |= GR_APPEND;
51222+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51223+ reqmode |= GR_READ;
51224+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51225+ reqmode |= GR_SETID;
51226+
51227+ mode =
51228+ gr_check_create(dentry, p_dentry, p_mnt,
51229+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51230+
51231+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51232+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51233+ reqmode & GR_READ ? " reading" : "",
51234+ reqmode & GR_WRITE ? " writing" : reqmode &
51235+ GR_APPEND ? " appending" : "");
51236+ return reqmode;
51237+ } else
51238+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51239+ {
51240+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51241+ reqmode & GR_READ ? " reading" : "",
51242+ reqmode & GR_WRITE ? " writing" : reqmode &
51243+ GR_APPEND ? " appending" : "");
51244+ return 0;
51245+ } else if (unlikely((mode & reqmode) != reqmode))
51246+ return 0;
51247+
51248+ return reqmode;
51249+}
51250+
51251+__u32
51252+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51253+ const int fmode)
51254+{
51255+ __u32 mode, reqmode = GR_FIND;
51256+
51257+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51258+ reqmode |= GR_EXEC;
51259+ if (fmode & S_IWOTH)
51260+ reqmode |= GR_WRITE;
51261+ if (fmode & S_IROTH)
51262+ reqmode |= GR_READ;
51263+
51264+ mode =
51265+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51266+ mnt);
51267+
51268+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51269+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51270+ reqmode & GR_READ ? " reading" : "",
51271+ reqmode & GR_WRITE ? " writing" : "",
51272+ reqmode & GR_EXEC ? " executing" : "");
51273+ return reqmode;
51274+ } else
51275+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51276+ {
51277+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51278+ reqmode & GR_READ ? " reading" : "",
51279+ reqmode & GR_WRITE ? " writing" : "",
51280+ reqmode & GR_EXEC ? " executing" : "");
51281+ return 0;
51282+ } else if (unlikely((mode & reqmode) != reqmode))
51283+ return 0;
51284+
51285+ return reqmode;
51286+}
51287+
51288+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51289+{
51290+ __u32 mode;
51291+
51292+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51293+
51294+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51295+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51296+ return mode;
51297+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51298+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51299+ return 0;
51300+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51301+ return 0;
51302+
51303+ return (reqmode);
51304+}
51305+
51306+__u32
51307+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51308+{
51309+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51310+}
51311+
51312+__u32
51313+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51314+{
51315+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51316+}
51317+
51318+__u32
51319+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51320+{
51321+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51322+}
51323+
51324+__u32
51325+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51326+{
51327+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51328+}
51329+
51330+__u32
51331+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51332+ mode_t mode)
51333+{
51334+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51335+ return 1;
51336+
51337+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51338+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51339+ GR_FCHMOD_ACL_MSG);
51340+ } else {
51341+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51342+ }
51343+}
51344+
51345+__u32
51346+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51347+ mode_t mode)
51348+{
51349+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51350+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51351+ GR_CHMOD_ACL_MSG);
51352+ } else {
51353+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51354+ }
51355+}
51356+
51357+__u32
51358+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51359+{
51360+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51361+}
51362+
51363+__u32
51364+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51365+{
51366+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51367+}
51368+
51369+__u32
51370+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51371+{
51372+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51373+}
51374+
51375+__u32
51376+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51377+{
51378+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51379+ GR_UNIXCONNECT_ACL_MSG);
51380+}
51381+
51382+/* hardlinks require at minimum create and link permission,
51383+ any additional privilege required is based on the
51384+ privilege of the file being linked to
51385+*/
51386+__u32
51387+gr_acl_handle_link(const struct dentry * new_dentry,
51388+ const struct dentry * parent_dentry,
51389+ const struct vfsmount * parent_mnt,
51390+ const struct dentry * old_dentry,
51391+ const struct vfsmount * old_mnt, const char *to)
51392+{
51393+ __u32 mode;
51394+ __u32 needmode = GR_CREATE | GR_LINK;
51395+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51396+
51397+ mode =
51398+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51399+ old_mnt);
51400+
51401+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51402+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51403+ return mode;
51404+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51405+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51406+ return 0;
51407+ } else if (unlikely((mode & needmode) != needmode))
51408+ return 0;
51409+
51410+ return 1;
51411+}
51412+
51413+__u32
51414+gr_acl_handle_symlink(const struct dentry * new_dentry,
51415+ const struct dentry * parent_dentry,
51416+ const struct vfsmount * parent_mnt, const char *from)
51417+{
51418+ __u32 needmode = GR_WRITE | GR_CREATE;
51419+ __u32 mode;
51420+
51421+ mode =
51422+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51423+ GR_CREATE | GR_AUDIT_CREATE |
51424+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51425+
51426+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51427+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51428+ return mode;
51429+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51430+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51431+ return 0;
51432+ } else if (unlikely((mode & needmode) != needmode))
51433+ return 0;
51434+
51435+ return (GR_WRITE | GR_CREATE);
51436+}
51437+
51438+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51439+{
51440+ __u32 mode;
51441+
51442+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51443+
51444+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51445+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51446+ return mode;
51447+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51448+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51449+ return 0;
51450+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51451+ return 0;
51452+
51453+ return (reqmode);
51454+}
51455+
51456+__u32
51457+gr_acl_handle_mknod(const struct dentry * new_dentry,
51458+ const struct dentry * parent_dentry,
51459+ const struct vfsmount * parent_mnt,
51460+ const int mode)
51461+{
51462+ __u32 reqmode = GR_WRITE | GR_CREATE;
51463+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51464+ reqmode |= GR_SETID;
51465+
51466+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51467+ reqmode, GR_MKNOD_ACL_MSG);
51468+}
51469+
51470+__u32
51471+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51472+ const struct dentry *parent_dentry,
51473+ const struct vfsmount *parent_mnt)
51474+{
51475+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51476+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51477+}
51478+
51479+#define RENAME_CHECK_SUCCESS(old, new) \
51480+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51481+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51482+
51483+int
51484+gr_acl_handle_rename(struct dentry *new_dentry,
51485+ struct dentry *parent_dentry,
51486+ const struct vfsmount *parent_mnt,
51487+ struct dentry *old_dentry,
51488+ struct inode *old_parent_inode,
51489+ struct vfsmount *old_mnt, const char *newname)
51490+{
51491+ __u32 comp1, comp2;
51492+ int error = 0;
51493+
51494+ if (unlikely(!gr_acl_is_enabled()))
51495+ return 0;
51496+
51497+ if (!new_dentry->d_inode) {
51498+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51499+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51500+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51501+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51502+ GR_DELETE | GR_AUDIT_DELETE |
51503+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51504+ GR_SUPPRESS, old_mnt);
51505+ } else {
51506+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51507+ GR_CREATE | GR_DELETE |
51508+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51509+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51510+ GR_SUPPRESS, parent_mnt);
51511+ comp2 =
51512+ gr_search_file(old_dentry,
51513+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51514+ GR_DELETE | GR_AUDIT_DELETE |
51515+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51516+ }
51517+
51518+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51519+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51520+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51521+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51522+ && !(comp2 & GR_SUPPRESS)) {
51523+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51524+ error = -EACCES;
51525+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51526+ error = -EACCES;
51527+
51528+ return error;
51529+}
51530+
51531+void
51532+gr_acl_handle_exit(void)
51533+{
51534+ u16 id;
51535+ char *rolename;
51536+ struct file *exec_file;
51537+
51538+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51539+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51540+ id = current->acl_role_id;
51541+ rolename = current->role->rolename;
51542+ gr_set_acls(1);
51543+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51544+ }
51545+
51546+ write_lock(&grsec_exec_file_lock);
51547+ exec_file = current->exec_file;
51548+ current->exec_file = NULL;
51549+ write_unlock(&grsec_exec_file_lock);
51550+
51551+ if (exec_file)
51552+ fput(exec_file);
51553+}
51554+
51555+int
51556+gr_acl_handle_procpidmem(const struct task_struct *task)
51557+{
51558+ if (unlikely(!gr_acl_is_enabled()))
51559+ return 0;
51560+
51561+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51562+ return -EACCES;
51563+
51564+ return 0;
51565+}
51566diff -urNp linux-3.0.7/grsecurity/gracl_ip.c linux-3.0.7/grsecurity/gracl_ip.c
51567--- linux-3.0.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51568+++ linux-3.0.7/grsecurity/gracl_ip.c 2011-08-23 21:48:14.000000000 -0400
51569@@ -0,0 +1,381 @@
51570+#include <linux/kernel.h>
51571+#include <asm/uaccess.h>
51572+#include <asm/errno.h>
51573+#include <net/sock.h>
51574+#include <linux/file.h>
51575+#include <linux/fs.h>
51576+#include <linux/net.h>
51577+#include <linux/in.h>
51578+#include <linux/skbuff.h>
51579+#include <linux/ip.h>
51580+#include <linux/udp.h>
51581+#include <linux/types.h>
51582+#include <linux/sched.h>
51583+#include <linux/netdevice.h>
51584+#include <linux/inetdevice.h>
51585+#include <linux/gracl.h>
51586+#include <linux/grsecurity.h>
51587+#include <linux/grinternal.h>
51588+
51589+#define GR_BIND 0x01
51590+#define GR_CONNECT 0x02
51591+#define GR_INVERT 0x04
51592+#define GR_BINDOVERRIDE 0x08
51593+#define GR_CONNECTOVERRIDE 0x10
51594+#define GR_SOCK_FAMILY 0x20
51595+
51596+static const char * gr_protocols[IPPROTO_MAX] = {
51597+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51598+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51599+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51600+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51601+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51602+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51603+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51604+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51605+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51606+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51607+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51608+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51609+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51610+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51611+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51612+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51613+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51614+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51615+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51616+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51617+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51618+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51619+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51620+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51621+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51622+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51623+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51624+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51625+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51626+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51627+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51628+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51629+ };
51630+
51631+static const char * gr_socktypes[SOCK_MAX] = {
51632+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51633+ "unknown:7", "unknown:8", "unknown:9", "packet"
51634+ };
51635+
51636+static const char * gr_sockfamilies[AF_MAX+1] = {
51637+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51638+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51639+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51640+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
51641+ };
51642+
51643+const char *
51644+gr_proto_to_name(unsigned char proto)
51645+{
51646+ return gr_protocols[proto];
51647+}
51648+
51649+const char *
51650+gr_socktype_to_name(unsigned char type)
51651+{
51652+ return gr_socktypes[type];
51653+}
51654+
51655+const char *
51656+gr_sockfamily_to_name(unsigned char family)
51657+{
51658+ return gr_sockfamilies[family];
51659+}
51660+
51661+int
51662+gr_search_socket(const int domain, const int type, const int protocol)
51663+{
51664+ struct acl_subject_label *curr;
51665+ const struct cred *cred = current_cred();
51666+
51667+ if (unlikely(!gr_acl_is_enabled()))
51668+ goto exit;
51669+
51670+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51671+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51672+ goto exit; // let the kernel handle it
51673+
51674+ curr = current->acl;
51675+
51676+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51677+ /* the family is allowed, if this is PF_INET allow it only if
51678+ the extra sock type/protocol checks pass */
51679+ if (domain == PF_INET)
51680+ goto inet_check;
51681+ goto exit;
51682+ } else {
51683+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51684+ __u32 fakeip = 0;
51685+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51686+ current->role->roletype, cred->uid,
51687+ cred->gid, current->exec_file ?
51688+ gr_to_filename(current->exec_file->f_path.dentry,
51689+ current->exec_file->f_path.mnt) :
51690+ curr->filename, curr->filename,
51691+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51692+ &current->signal->saved_ip);
51693+ goto exit;
51694+ }
51695+ goto exit_fail;
51696+ }
51697+
51698+inet_check:
51699+ /* the rest of this checking is for IPv4 only */
51700+ if (!curr->ips)
51701+ goto exit;
51702+
51703+ if ((curr->ip_type & (1 << type)) &&
51704+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51705+ goto exit;
51706+
51707+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51708+ /* we don't place acls on raw sockets , and sometimes
51709+ dgram/ip sockets are opened for ioctl and not
51710+ bind/connect, so we'll fake a bind learn log */
51711+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51712+ __u32 fakeip = 0;
51713+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51714+ current->role->roletype, cred->uid,
51715+ cred->gid, current->exec_file ?
51716+ gr_to_filename(current->exec_file->f_path.dentry,
51717+ current->exec_file->f_path.mnt) :
51718+ curr->filename, curr->filename,
51719+ &fakeip, 0, type,
51720+ protocol, GR_CONNECT, &current->signal->saved_ip);
51721+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51722+ __u32 fakeip = 0;
51723+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51724+ current->role->roletype, cred->uid,
51725+ cred->gid, current->exec_file ?
51726+ gr_to_filename(current->exec_file->f_path.dentry,
51727+ current->exec_file->f_path.mnt) :
51728+ curr->filename, curr->filename,
51729+ &fakeip, 0, type,
51730+ protocol, GR_BIND, &current->signal->saved_ip);
51731+ }
51732+ /* we'll log when they use connect or bind */
51733+ goto exit;
51734+ }
51735+
51736+exit_fail:
51737+ if (domain == PF_INET)
51738+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51739+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51740+ else
51741+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51742+ gr_socktype_to_name(type), protocol);
51743+
51744+ return 0;
51745+exit:
51746+ return 1;
51747+}
51748+
51749+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51750+{
51751+ if ((ip->mode & mode) &&
51752+ (ip_port >= ip->low) &&
51753+ (ip_port <= ip->high) &&
51754+ ((ntohl(ip_addr) & our_netmask) ==
51755+ (ntohl(our_addr) & our_netmask))
51756+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51757+ && (ip->type & (1 << type))) {
51758+ if (ip->mode & GR_INVERT)
51759+ return 2; // specifically denied
51760+ else
51761+ return 1; // allowed
51762+ }
51763+
51764+ return 0; // not specifically allowed, may continue parsing
51765+}
51766+
51767+static int
51768+gr_search_connectbind(const int full_mode, struct sock *sk,
51769+ struct sockaddr_in *addr, const int type)
51770+{
51771+ char iface[IFNAMSIZ] = {0};
51772+ struct acl_subject_label *curr;
51773+ struct acl_ip_label *ip;
51774+ struct inet_sock *isk;
51775+ struct net_device *dev;
51776+ struct in_device *idev;
51777+ unsigned long i;
51778+ int ret;
51779+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51780+ __u32 ip_addr = 0;
51781+ __u32 our_addr;
51782+ __u32 our_netmask;
51783+ char *p;
51784+ __u16 ip_port = 0;
51785+ const struct cred *cred = current_cred();
51786+
51787+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51788+ return 0;
51789+
51790+ curr = current->acl;
51791+ isk = inet_sk(sk);
51792+
51793+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51794+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51795+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51796+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51797+ struct sockaddr_in saddr;
51798+ int err;
51799+
51800+ saddr.sin_family = AF_INET;
51801+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51802+ saddr.sin_port = isk->inet_sport;
51803+
51804+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51805+ if (err)
51806+ return err;
51807+
51808+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51809+ if (err)
51810+ return err;
51811+ }
51812+
51813+ if (!curr->ips)
51814+ return 0;
51815+
51816+ ip_addr = addr->sin_addr.s_addr;
51817+ ip_port = ntohs(addr->sin_port);
51818+
51819+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51820+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51821+ current->role->roletype, cred->uid,
51822+ cred->gid, current->exec_file ?
51823+ gr_to_filename(current->exec_file->f_path.dentry,
51824+ current->exec_file->f_path.mnt) :
51825+ curr->filename, curr->filename,
51826+ &ip_addr, ip_port, type,
51827+ sk->sk_protocol, mode, &current->signal->saved_ip);
51828+ return 0;
51829+ }
51830+
51831+ for (i = 0; i < curr->ip_num; i++) {
51832+ ip = *(curr->ips + i);
51833+ if (ip->iface != NULL) {
51834+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51835+ p = strchr(iface, ':');
51836+ if (p != NULL)
51837+ *p = '\0';
51838+ dev = dev_get_by_name(sock_net(sk), iface);
51839+ if (dev == NULL)
51840+ continue;
51841+ idev = in_dev_get(dev);
51842+ if (idev == NULL) {
51843+ dev_put(dev);
51844+ continue;
51845+ }
51846+ rcu_read_lock();
51847+ for_ifa(idev) {
51848+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51849+ our_addr = ifa->ifa_address;
51850+ our_netmask = 0xffffffff;
51851+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51852+ if (ret == 1) {
51853+ rcu_read_unlock();
51854+ in_dev_put(idev);
51855+ dev_put(dev);
51856+ return 0;
51857+ } else if (ret == 2) {
51858+ rcu_read_unlock();
51859+ in_dev_put(idev);
51860+ dev_put(dev);
51861+ goto denied;
51862+ }
51863+ }
51864+ } endfor_ifa(idev);
51865+ rcu_read_unlock();
51866+ in_dev_put(idev);
51867+ dev_put(dev);
51868+ } else {
51869+ our_addr = ip->addr;
51870+ our_netmask = ip->netmask;
51871+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51872+ if (ret == 1)
51873+ return 0;
51874+ else if (ret == 2)
51875+ goto denied;
51876+ }
51877+ }
51878+
51879+denied:
51880+ if (mode == GR_BIND)
51881+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51882+ else if (mode == GR_CONNECT)
51883+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51884+
51885+ return -EACCES;
51886+}
51887+
51888+int
51889+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51890+{
51891+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51892+}
51893+
51894+int
51895+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51896+{
51897+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51898+}
51899+
51900+int gr_search_listen(struct socket *sock)
51901+{
51902+ struct sock *sk = sock->sk;
51903+ struct sockaddr_in addr;
51904+
51905+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51906+ addr.sin_port = inet_sk(sk)->inet_sport;
51907+
51908+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51909+}
51910+
51911+int gr_search_accept(struct socket *sock)
51912+{
51913+ struct sock *sk = sock->sk;
51914+ struct sockaddr_in addr;
51915+
51916+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
51917+ addr.sin_port = inet_sk(sk)->inet_sport;
51918+
51919+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51920+}
51921+
51922+int
51923+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51924+{
51925+ if (addr)
51926+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51927+ else {
51928+ struct sockaddr_in sin;
51929+ const struct inet_sock *inet = inet_sk(sk);
51930+
51931+ sin.sin_addr.s_addr = inet->inet_daddr;
51932+ sin.sin_port = inet->inet_dport;
51933+
51934+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51935+ }
51936+}
51937+
51938+int
51939+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51940+{
51941+ struct sockaddr_in sin;
51942+
51943+ if (unlikely(skb->len < sizeof (struct udphdr)))
51944+ return 0; // skip this packet
51945+
51946+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51947+ sin.sin_port = udp_hdr(skb)->source;
51948+
51949+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51950+}
51951diff -urNp linux-3.0.7/grsecurity/gracl_learn.c linux-3.0.7/grsecurity/gracl_learn.c
51952--- linux-3.0.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51953+++ linux-3.0.7/grsecurity/gracl_learn.c 2011-08-23 21:48:14.000000000 -0400
51954@@ -0,0 +1,207 @@
51955+#include <linux/kernel.h>
51956+#include <linux/mm.h>
51957+#include <linux/sched.h>
51958+#include <linux/poll.h>
51959+#include <linux/string.h>
51960+#include <linux/file.h>
51961+#include <linux/types.h>
51962+#include <linux/vmalloc.h>
51963+#include <linux/grinternal.h>
51964+
51965+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51966+ size_t count, loff_t *ppos);
51967+extern int gr_acl_is_enabled(void);
51968+
51969+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51970+static int gr_learn_attached;
51971+
51972+/* use a 512k buffer */
51973+#define LEARN_BUFFER_SIZE (512 * 1024)
51974+
51975+static DEFINE_SPINLOCK(gr_learn_lock);
51976+static DEFINE_MUTEX(gr_learn_user_mutex);
51977+
51978+/* we need to maintain two buffers, so that the kernel context of grlearn
51979+ uses a semaphore around the userspace copying, and the other kernel contexts
51980+ use a spinlock when copying into the buffer, since they cannot sleep
51981+*/
51982+static char *learn_buffer;
51983+static char *learn_buffer_user;
51984+static int learn_buffer_len;
51985+static int learn_buffer_user_len;
51986+
51987+static ssize_t
51988+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51989+{
51990+ DECLARE_WAITQUEUE(wait, current);
51991+ ssize_t retval = 0;
51992+
51993+ add_wait_queue(&learn_wait, &wait);
51994+ set_current_state(TASK_INTERRUPTIBLE);
51995+ do {
51996+ mutex_lock(&gr_learn_user_mutex);
51997+ spin_lock(&gr_learn_lock);
51998+ if (learn_buffer_len)
51999+ break;
52000+ spin_unlock(&gr_learn_lock);
52001+ mutex_unlock(&gr_learn_user_mutex);
52002+ if (file->f_flags & O_NONBLOCK) {
52003+ retval = -EAGAIN;
52004+ goto out;
52005+ }
52006+ if (signal_pending(current)) {
52007+ retval = -ERESTARTSYS;
52008+ goto out;
52009+ }
52010+
52011+ schedule();
52012+ } while (1);
52013+
52014+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52015+ learn_buffer_user_len = learn_buffer_len;
52016+ retval = learn_buffer_len;
52017+ learn_buffer_len = 0;
52018+
52019+ spin_unlock(&gr_learn_lock);
52020+
52021+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52022+ retval = -EFAULT;
52023+
52024+ mutex_unlock(&gr_learn_user_mutex);
52025+out:
52026+ set_current_state(TASK_RUNNING);
52027+ remove_wait_queue(&learn_wait, &wait);
52028+ return retval;
52029+}
52030+
52031+static unsigned int
52032+poll_learn(struct file * file, poll_table * wait)
52033+{
52034+ poll_wait(file, &learn_wait, wait);
52035+
52036+ if (learn_buffer_len)
52037+ return (POLLIN | POLLRDNORM);
52038+
52039+ return 0;
52040+}
52041+
52042+void
52043+gr_clear_learn_entries(void)
52044+{
52045+ char *tmp;
52046+
52047+ mutex_lock(&gr_learn_user_mutex);
52048+ spin_lock(&gr_learn_lock);
52049+ tmp = learn_buffer;
52050+ learn_buffer = NULL;
52051+ spin_unlock(&gr_learn_lock);
52052+ if (tmp)
52053+ vfree(tmp);
52054+ if (learn_buffer_user != NULL) {
52055+ vfree(learn_buffer_user);
52056+ learn_buffer_user = NULL;
52057+ }
52058+ learn_buffer_len = 0;
52059+ mutex_unlock(&gr_learn_user_mutex);
52060+
52061+ return;
52062+}
52063+
52064+void
52065+gr_add_learn_entry(const char *fmt, ...)
52066+{
52067+ va_list args;
52068+ unsigned int len;
52069+
52070+ if (!gr_learn_attached)
52071+ return;
52072+
52073+ spin_lock(&gr_learn_lock);
52074+
52075+ /* leave a gap at the end so we know when it's "full" but don't have to
52076+ compute the exact length of the string we're trying to append
52077+ */
52078+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52079+ spin_unlock(&gr_learn_lock);
52080+ wake_up_interruptible(&learn_wait);
52081+ return;
52082+ }
52083+ if (learn_buffer == NULL) {
52084+ spin_unlock(&gr_learn_lock);
52085+ return;
52086+ }
52087+
52088+ va_start(args, fmt);
52089+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52090+ va_end(args);
52091+
52092+ learn_buffer_len += len + 1;
52093+
52094+ spin_unlock(&gr_learn_lock);
52095+ wake_up_interruptible(&learn_wait);
52096+
52097+ return;
52098+}
52099+
52100+static int
52101+open_learn(struct inode *inode, struct file *file)
52102+{
52103+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52104+ return -EBUSY;
52105+ if (file->f_mode & FMODE_READ) {
52106+ int retval = 0;
52107+ mutex_lock(&gr_learn_user_mutex);
52108+ if (learn_buffer == NULL)
52109+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52110+ if (learn_buffer_user == NULL)
52111+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52112+ if (learn_buffer == NULL) {
52113+ retval = -ENOMEM;
52114+ goto out_error;
52115+ }
52116+ if (learn_buffer_user == NULL) {
52117+ retval = -ENOMEM;
52118+ goto out_error;
52119+ }
52120+ learn_buffer_len = 0;
52121+ learn_buffer_user_len = 0;
52122+ gr_learn_attached = 1;
52123+out_error:
52124+ mutex_unlock(&gr_learn_user_mutex);
52125+ return retval;
52126+ }
52127+ return 0;
52128+}
52129+
52130+static int
52131+close_learn(struct inode *inode, struct file *file)
52132+{
52133+ if (file->f_mode & FMODE_READ) {
52134+ char *tmp = NULL;
52135+ mutex_lock(&gr_learn_user_mutex);
52136+ spin_lock(&gr_learn_lock);
52137+ tmp = learn_buffer;
52138+ learn_buffer = NULL;
52139+ spin_unlock(&gr_learn_lock);
52140+ if (tmp)
52141+ vfree(tmp);
52142+ if (learn_buffer_user != NULL) {
52143+ vfree(learn_buffer_user);
52144+ learn_buffer_user = NULL;
52145+ }
52146+ learn_buffer_len = 0;
52147+ learn_buffer_user_len = 0;
52148+ gr_learn_attached = 0;
52149+ mutex_unlock(&gr_learn_user_mutex);
52150+ }
52151+
52152+ return 0;
52153+}
52154+
52155+const struct file_operations grsec_fops = {
52156+ .read = read_learn,
52157+ .write = write_grsec_handler,
52158+ .open = open_learn,
52159+ .release = close_learn,
52160+ .poll = poll_learn,
52161+};
52162diff -urNp linux-3.0.7/grsecurity/gracl_res.c linux-3.0.7/grsecurity/gracl_res.c
52163--- linux-3.0.7/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52164+++ linux-3.0.7/grsecurity/gracl_res.c 2011-08-23 21:48:14.000000000 -0400
52165@@ -0,0 +1,68 @@
52166+#include <linux/kernel.h>
52167+#include <linux/sched.h>
52168+#include <linux/gracl.h>
52169+#include <linux/grinternal.h>
52170+
52171+static const char *restab_log[] = {
52172+ [RLIMIT_CPU] = "RLIMIT_CPU",
52173+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52174+ [RLIMIT_DATA] = "RLIMIT_DATA",
52175+ [RLIMIT_STACK] = "RLIMIT_STACK",
52176+ [RLIMIT_CORE] = "RLIMIT_CORE",
52177+ [RLIMIT_RSS] = "RLIMIT_RSS",
52178+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52179+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52180+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52181+ [RLIMIT_AS] = "RLIMIT_AS",
52182+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52183+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52184+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52185+ [RLIMIT_NICE] = "RLIMIT_NICE",
52186+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52187+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52188+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52189+};
52190+
52191+void
52192+gr_log_resource(const struct task_struct *task,
52193+ const int res, const unsigned long wanted, const int gt)
52194+{
52195+ const struct cred *cred;
52196+ unsigned long rlim;
52197+
52198+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52199+ return;
52200+
52201+ // not yet supported resource
52202+ if (unlikely(!restab_log[res]))
52203+ return;
52204+
52205+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52206+ rlim = task_rlimit_max(task, res);
52207+ else
52208+ rlim = task_rlimit(task, res);
52209+
52210+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52211+ return;
52212+
52213+ rcu_read_lock();
52214+ cred = __task_cred(task);
52215+
52216+ if (res == RLIMIT_NPROC &&
52217+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52218+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52219+ goto out_rcu_unlock;
52220+ else if (res == RLIMIT_MEMLOCK &&
52221+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52222+ goto out_rcu_unlock;
52223+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52224+ goto out_rcu_unlock;
52225+ rcu_read_unlock();
52226+
52227+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52228+
52229+ return;
52230+out_rcu_unlock:
52231+ rcu_read_unlock();
52232+ return;
52233+}
52234diff -urNp linux-3.0.7/grsecurity/gracl_segv.c linux-3.0.7/grsecurity/gracl_segv.c
52235--- linux-3.0.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52236+++ linux-3.0.7/grsecurity/gracl_segv.c 2011-08-23 21:48:14.000000000 -0400
52237@@ -0,0 +1,299 @@
52238+#include <linux/kernel.h>
52239+#include <linux/mm.h>
52240+#include <asm/uaccess.h>
52241+#include <asm/errno.h>
52242+#include <asm/mman.h>
52243+#include <net/sock.h>
52244+#include <linux/file.h>
52245+#include <linux/fs.h>
52246+#include <linux/net.h>
52247+#include <linux/in.h>
52248+#include <linux/slab.h>
52249+#include <linux/types.h>
52250+#include <linux/sched.h>
52251+#include <linux/timer.h>
52252+#include <linux/gracl.h>
52253+#include <linux/grsecurity.h>
52254+#include <linux/grinternal.h>
52255+
52256+static struct crash_uid *uid_set;
52257+static unsigned short uid_used;
52258+static DEFINE_SPINLOCK(gr_uid_lock);
52259+extern rwlock_t gr_inode_lock;
52260+extern struct acl_subject_label *
52261+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52262+ struct acl_role_label *role);
52263+
52264+#ifdef CONFIG_BTRFS_FS
52265+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52266+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52267+#endif
52268+
52269+static inline dev_t __get_dev(const struct dentry *dentry)
52270+{
52271+#ifdef CONFIG_BTRFS_FS
52272+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52273+ return get_btrfs_dev_from_inode(dentry->d_inode);
52274+ else
52275+#endif
52276+ return dentry->d_inode->i_sb->s_dev;
52277+}
52278+
52279+int
52280+gr_init_uidset(void)
52281+{
52282+ uid_set =
52283+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52284+ uid_used = 0;
52285+
52286+ return uid_set ? 1 : 0;
52287+}
52288+
52289+void
52290+gr_free_uidset(void)
52291+{
52292+ if (uid_set)
52293+ kfree(uid_set);
52294+
52295+ return;
52296+}
52297+
52298+int
52299+gr_find_uid(const uid_t uid)
52300+{
52301+ struct crash_uid *tmp = uid_set;
52302+ uid_t buid;
52303+ int low = 0, high = uid_used - 1, mid;
52304+
52305+ while (high >= low) {
52306+ mid = (low + high) >> 1;
52307+ buid = tmp[mid].uid;
52308+ if (buid == uid)
52309+ return mid;
52310+ if (buid > uid)
52311+ high = mid - 1;
52312+ if (buid < uid)
52313+ low = mid + 1;
52314+ }
52315+
52316+ return -1;
52317+}
52318+
52319+static __inline__ void
52320+gr_insertsort(void)
52321+{
52322+ unsigned short i, j;
52323+ struct crash_uid index;
52324+
52325+ for (i = 1; i < uid_used; i++) {
52326+ index = uid_set[i];
52327+ j = i;
52328+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52329+ uid_set[j] = uid_set[j - 1];
52330+ j--;
52331+ }
52332+ uid_set[j] = index;
52333+ }
52334+
52335+ return;
52336+}
52337+
52338+static __inline__ void
52339+gr_insert_uid(const uid_t uid, const unsigned long expires)
52340+{
52341+ int loc;
52342+
52343+ if (uid_used == GR_UIDTABLE_MAX)
52344+ return;
52345+
52346+ loc = gr_find_uid(uid);
52347+
52348+ if (loc >= 0) {
52349+ uid_set[loc].expires = expires;
52350+ return;
52351+ }
52352+
52353+ uid_set[uid_used].uid = uid;
52354+ uid_set[uid_used].expires = expires;
52355+ uid_used++;
52356+
52357+ gr_insertsort();
52358+
52359+ return;
52360+}
52361+
52362+void
52363+gr_remove_uid(const unsigned short loc)
52364+{
52365+ unsigned short i;
52366+
52367+ for (i = loc + 1; i < uid_used; i++)
52368+ uid_set[i - 1] = uid_set[i];
52369+
52370+ uid_used--;
52371+
52372+ return;
52373+}
52374+
52375+int
52376+gr_check_crash_uid(const uid_t uid)
52377+{
52378+ int loc;
52379+ int ret = 0;
52380+
52381+ if (unlikely(!gr_acl_is_enabled()))
52382+ return 0;
52383+
52384+ spin_lock(&gr_uid_lock);
52385+ loc = gr_find_uid(uid);
52386+
52387+ if (loc < 0)
52388+ goto out_unlock;
52389+
52390+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52391+ gr_remove_uid(loc);
52392+ else
52393+ ret = 1;
52394+
52395+out_unlock:
52396+ spin_unlock(&gr_uid_lock);
52397+ return ret;
52398+}
52399+
52400+static __inline__ int
52401+proc_is_setxid(const struct cred *cred)
52402+{
52403+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52404+ cred->uid != cred->fsuid)
52405+ return 1;
52406+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52407+ cred->gid != cred->fsgid)
52408+ return 1;
52409+
52410+ return 0;
52411+}
52412+
52413+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52414+
52415+void
52416+gr_handle_crash(struct task_struct *task, const int sig)
52417+{
52418+ struct acl_subject_label *curr;
52419+ struct acl_subject_label *curr2;
52420+ struct task_struct *tsk, *tsk2;
52421+ const struct cred *cred;
52422+ const struct cred *cred2;
52423+
52424+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52425+ return;
52426+
52427+ if (unlikely(!gr_acl_is_enabled()))
52428+ return;
52429+
52430+ curr = task->acl;
52431+
52432+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52433+ return;
52434+
52435+ if (time_before_eq(curr->expires, get_seconds())) {
52436+ curr->expires = 0;
52437+ curr->crashes = 0;
52438+ }
52439+
52440+ curr->crashes++;
52441+
52442+ if (!curr->expires)
52443+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52444+
52445+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52446+ time_after(curr->expires, get_seconds())) {
52447+ rcu_read_lock();
52448+ cred = __task_cred(task);
52449+ if (cred->uid && proc_is_setxid(cred)) {
52450+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52451+ spin_lock(&gr_uid_lock);
52452+ gr_insert_uid(cred->uid, curr->expires);
52453+ spin_unlock(&gr_uid_lock);
52454+ curr->expires = 0;
52455+ curr->crashes = 0;
52456+ read_lock(&tasklist_lock);
52457+ do_each_thread(tsk2, tsk) {
52458+ cred2 = __task_cred(tsk);
52459+ if (tsk != task && cred2->uid == cred->uid)
52460+ gr_fake_force_sig(SIGKILL, tsk);
52461+ } while_each_thread(tsk2, tsk);
52462+ read_unlock(&tasklist_lock);
52463+ } else {
52464+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52465+ read_lock(&tasklist_lock);
52466+ do_each_thread(tsk2, tsk) {
52467+ if (likely(tsk != task)) {
52468+ curr2 = tsk->acl;
52469+
52470+ if (curr2->device == curr->device &&
52471+ curr2->inode == curr->inode)
52472+ gr_fake_force_sig(SIGKILL, tsk);
52473+ }
52474+ } while_each_thread(tsk2, tsk);
52475+ read_unlock(&tasklist_lock);
52476+ }
52477+ rcu_read_unlock();
52478+ }
52479+
52480+ return;
52481+}
52482+
52483+int
52484+gr_check_crash_exec(const struct file *filp)
52485+{
52486+ struct acl_subject_label *curr;
52487+
52488+ if (unlikely(!gr_acl_is_enabled()))
52489+ return 0;
52490+
52491+ read_lock(&gr_inode_lock);
52492+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52493+ __get_dev(filp->f_path.dentry),
52494+ current->role);
52495+ read_unlock(&gr_inode_lock);
52496+
52497+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52498+ (!curr->crashes && !curr->expires))
52499+ return 0;
52500+
52501+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52502+ time_after(curr->expires, get_seconds()))
52503+ return 1;
52504+ else if (time_before_eq(curr->expires, get_seconds())) {
52505+ curr->crashes = 0;
52506+ curr->expires = 0;
52507+ }
52508+
52509+ return 0;
52510+}
52511+
52512+void
52513+gr_handle_alertkill(struct task_struct *task)
52514+{
52515+ struct acl_subject_label *curracl;
52516+ __u32 curr_ip;
52517+ struct task_struct *p, *p2;
52518+
52519+ if (unlikely(!gr_acl_is_enabled()))
52520+ return;
52521+
52522+ curracl = task->acl;
52523+ curr_ip = task->signal->curr_ip;
52524+
52525+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52526+ read_lock(&tasklist_lock);
52527+ do_each_thread(p2, p) {
52528+ if (p->signal->curr_ip == curr_ip)
52529+ gr_fake_force_sig(SIGKILL, p);
52530+ } while_each_thread(p2, p);
52531+ read_unlock(&tasklist_lock);
52532+ } else if (curracl->mode & GR_KILLPROC)
52533+ gr_fake_force_sig(SIGKILL, task);
52534+
52535+ return;
52536+}
52537diff -urNp linux-3.0.7/grsecurity/gracl_shm.c linux-3.0.7/grsecurity/gracl_shm.c
52538--- linux-3.0.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52539+++ linux-3.0.7/grsecurity/gracl_shm.c 2011-08-23 21:48:14.000000000 -0400
52540@@ -0,0 +1,40 @@
52541+#include <linux/kernel.h>
52542+#include <linux/mm.h>
52543+#include <linux/sched.h>
52544+#include <linux/file.h>
52545+#include <linux/ipc.h>
52546+#include <linux/gracl.h>
52547+#include <linux/grsecurity.h>
52548+#include <linux/grinternal.h>
52549+
52550+int
52551+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52552+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52553+{
52554+ struct task_struct *task;
52555+
52556+ if (!gr_acl_is_enabled())
52557+ return 1;
52558+
52559+ rcu_read_lock();
52560+ read_lock(&tasklist_lock);
52561+
52562+ task = find_task_by_vpid(shm_cprid);
52563+
52564+ if (unlikely(!task))
52565+ task = find_task_by_vpid(shm_lapid);
52566+
52567+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52568+ (task->pid == shm_lapid)) &&
52569+ (task->acl->mode & GR_PROTSHM) &&
52570+ (task->acl != current->acl))) {
52571+ read_unlock(&tasklist_lock);
52572+ rcu_read_unlock();
52573+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52574+ return 0;
52575+ }
52576+ read_unlock(&tasklist_lock);
52577+ rcu_read_unlock();
52578+
52579+ return 1;
52580+}
52581diff -urNp linux-3.0.7/grsecurity/grsec_chdir.c linux-3.0.7/grsecurity/grsec_chdir.c
52582--- linux-3.0.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52583+++ linux-3.0.7/grsecurity/grsec_chdir.c 2011-08-23 21:48:14.000000000 -0400
52584@@ -0,0 +1,19 @@
52585+#include <linux/kernel.h>
52586+#include <linux/sched.h>
52587+#include <linux/fs.h>
52588+#include <linux/file.h>
52589+#include <linux/grsecurity.h>
52590+#include <linux/grinternal.h>
52591+
52592+void
52593+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52594+{
52595+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52596+ if ((grsec_enable_chdir && grsec_enable_group &&
52597+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52598+ !grsec_enable_group)) {
52599+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52600+ }
52601+#endif
52602+ return;
52603+}
52604diff -urNp linux-3.0.7/grsecurity/grsec_chroot.c linux-3.0.7/grsecurity/grsec_chroot.c
52605--- linux-3.0.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52606+++ linux-3.0.7/grsecurity/grsec_chroot.c 2011-09-15 06:47:48.000000000 -0400
52607@@ -0,0 +1,351 @@
52608+#include <linux/kernel.h>
52609+#include <linux/module.h>
52610+#include <linux/sched.h>
52611+#include <linux/file.h>
52612+#include <linux/fs.h>
52613+#include <linux/mount.h>
52614+#include <linux/types.h>
52615+#include <linux/pid_namespace.h>
52616+#include <linux/grsecurity.h>
52617+#include <linux/grinternal.h>
52618+
52619+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52620+{
52621+#ifdef CONFIG_GRKERNSEC
52622+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52623+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52624+ task->gr_is_chrooted = 1;
52625+ else
52626+ task->gr_is_chrooted = 0;
52627+
52628+ task->gr_chroot_dentry = path->dentry;
52629+#endif
52630+ return;
52631+}
52632+
52633+void gr_clear_chroot_entries(struct task_struct *task)
52634+{
52635+#ifdef CONFIG_GRKERNSEC
52636+ task->gr_is_chrooted = 0;
52637+ task->gr_chroot_dentry = NULL;
52638+#endif
52639+ return;
52640+}
52641+
52642+int
52643+gr_handle_chroot_unix(const pid_t pid)
52644+{
52645+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52646+ struct task_struct *p;
52647+
52648+ if (unlikely(!grsec_enable_chroot_unix))
52649+ return 1;
52650+
52651+ if (likely(!proc_is_chrooted(current)))
52652+ return 1;
52653+
52654+ rcu_read_lock();
52655+ read_lock(&tasklist_lock);
52656+ p = find_task_by_vpid_unrestricted(pid);
52657+ if (unlikely(p && !have_same_root(current, p))) {
52658+ read_unlock(&tasklist_lock);
52659+ rcu_read_unlock();
52660+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52661+ return 0;
52662+ }
52663+ read_unlock(&tasklist_lock);
52664+ rcu_read_unlock();
52665+#endif
52666+ return 1;
52667+}
52668+
52669+int
52670+gr_handle_chroot_nice(void)
52671+{
52672+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52673+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52674+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52675+ return -EPERM;
52676+ }
52677+#endif
52678+ return 0;
52679+}
52680+
52681+int
52682+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52683+{
52684+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52685+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52686+ && proc_is_chrooted(current)) {
52687+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52688+ return -EACCES;
52689+ }
52690+#endif
52691+ return 0;
52692+}
52693+
52694+int
52695+gr_handle_chroot_rawio(const struct inode *inode)
52696+{
52697+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52698+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52699+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52700+ return 1;
52701+#endif
52702+ return 0;
52703+}
52704+
52705+int
52706+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52707+{
52708+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52709+ struct task_struct *p;
52710+ int ret = 0;
52711+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52712+ return ret;
52713+
52714+ read_lock(&tasklist_lock);
52715+ do_each_pid_task(pid, type, p) {
52716+ if (!have_same_root(current, p)) {
52717+ ret = 1;
52718+ goto out;
52719+ }
52720+ } while_each_pid_task(pid, type, p);
52721+out:
52722+ read_unlock(&tasklist_lock);
52723+ return ret;
52724+#endif
52725+ return 0;
52726+}
52727+
52728+int
52729+gr_pid_is_chrooted(struct task_struct *p)
52730+{
52731+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52732+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52733+ return 0;
52734+
52735+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52736+ !have_same_root(current, p)) {
52737+ return 1;
52738+ }
52739+#endif
52740+ return 0;
52741+}
52742+
52743+EXPORT_SYMBOL(gr_pid_is_chrooted);
52744+
52745+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52746+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52747+{
52748+ struct path path, currentroot;
52749+ int ret = 0;
52750+
52751+ path.dentry = (struct dentry *)u_dentry;
52752+ path.mnt = (struct vfsmount *)u_mnt;
52753+ get_fs_root(current->fs, &currentroot);
52754+ if (path_is_under(&path, &currentroot))
52755+ ret = 1;
52756+ path_put(&currentroot);
52757+
52758+ return ret;
52759+}
52760+#endif
52761+
52762+int
52763+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52764+{
52765+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52766+ if (!grsec_enable_chroot_fchdir)
52767+ return 1;
52768+
52769+ if (!proc_is_chrooted(current))
52770+ return 1;
52771+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52772+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52773+ return 0;
52774+ }
52775+#endif
52776+ return 1;
52777+}
52778+
52779+int
52780+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52781+ const time_t shm_createtime)
52782+{
52783+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52784+ struct task_struct *p;
52785+ time_t starttime;
52786+
52787+ if (unlikely(!grsec_enable_chroot_shmat))
52788+ return 1;
52789+
52790+ if (likely(!proc_is_chrooted(current)))
52791+ return 1;
52792+
52793+ rcu_read_lock();
52794+ read_lock(&tasklist_lock);
52795+
52796+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52797+ starttime = p->start_time.tv_sec;
52798+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52799+ if (have_same_root(current, p)) {
52800+ goto allow;
52801+ } else {
52802+ read_unlock(&tasklist_lock);
52803+ rcu_read_unlock();
52804+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52805+ return 0;
52806+ }
52807+ }
52808+ /* creator exited, pid reuse, fall through to next check */
52809+ }
52810+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52811+ if (unlikely(!have_same_root(current, p))) {
52812+ read_unlock(&tasklist_lock);
52813+ rcu_read_unlock();
52814+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52815+ return 0;
52816+ }
52817+ }
52818+
52819+allow:
52820+ read_unlock(&tasklist_lock);
52821+ rcu_read_unlock();
52822+#endif
52823+ return 1;
52824+}
52825+
52826+void
52827+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52828+{
52829+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52830+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52831+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52832+#endif
52833+ return;
52834+}
52835+
52836+int
52837+gr_handle_chroot_mknod(const struct dentry *dentry,
52838+ const struct vfsmount *mnt, const int mode)
52839+{
52840+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52841+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52842+ proc_is_chrooted(current)) {
52843+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52844+ return -EPERM;
52845+ }
52846+#endif
52847+ return 0;
52848+}
52849+
52850+int
52851+gr_handle_chroot_mount(const struct dentry *dentry,
52852+ const struct vfsmount *mnt, const char *dev_name)
52853+{
52854+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52855+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52856+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
52857+ return -EPERM;
52858+ }
52859+#endif
52860+ return 0;
52861+}
52862+
52863+int
52864+gr_handle_chroot_pivot(void)
52865+{
52866+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52867+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52868+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52869+ return -EPERM;
52870+ }
52871+#endif
52872+ return 0;
52873+}
52874+
52875+int
52876+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52877+{
52878+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52879+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52880+ !gr_is_outside_chroot(dentry, mnt)) {
52881+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52882+ return -EPERM;
52883+ }
52884+#endif
52885+ return 0;
52886+}
52887+
52888+extern const char *captab_log[];
52889+extern int captab_log_entries;
52890+
52891+int
52892+gr_chroot_is_capable(const int cap)
52893+{
52894+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52895+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52896+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52897+ if (cap_raised(chroot_caps, cap)) {
52898+ const struct cred *creds = current_cred();
52899+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
52900+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
52901+ }
52902+ return 0;
52903+ }
52904+ }
52905+#endif
52906+ return 1;
52907+}
52908+
52909+int
52910+gr_chroot_is_capable_nolog(const int cap)
52911+{
52912+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52913+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
52914+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52915+ if (cap_raised(chroot_caps, cap)) {
52916+ return 0;
52917+ }
52918+ }
52919+#endif
52920+ return 1;
52921+}
52922+
52923+int
52924+gr_handle_chroot_sysctl(const int op)
52925+{
52926+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52927+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
52928+ proc_is_chrooted(current))
52929+ return -EACCES;
52930+#endif
52931+ return 0;
52932+}
52933+
52934+void
52935+gr_handle_chroot_chdir(struct path *path)
52936+{
52937+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52938+ if (grsec_enable_chroot_chdir)
52939+ set_fs_pwd(current->fs, path);
52940+#endif
52941+ return;
52942+}
52943+
52944+int
52945+gr_handle_chroot_chmod(const struct dentry *dentry,
52946+ const struct vfsmount *mnt, const int mode)
52947+{
52948+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52949+ /* allow chmod +s on directories, but not files */
52950+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52951+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52952+ proc_is_chrooted(current)) {
52953+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52954+ return -EPERM;
52955+ }
52956+#endif
52957+ return 0;
52958+}
52959diff -urNp linux-3.0.7/grsecurity/grsec_disabled.c linux-3.0.7/grsecurity/grsec_disabled.c
52960--- linux-3.0.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52961+++ linux-3.0.7/grsecurity/grsec_disabled.c 2011-09-24 08:13:01.000000000 -0400
52962@@ -0,0 +1,433 @@
52963+#include <linux/kernel.h>
52964+#include <linux/module.h>
52965+#include <linux/sched.h>
52966+#include <linux/file.h>
52967+#include <linux/fs.h>
52968+#include <linux/kdev_t.h>
52969+#include <linux/net.h>
52970+#include <linux/in.h>
52971+#include <linux/ip.h>
52972+#include <linux/skbuff.h>
52973+#include <linux/sysctl.h>
52974+
52975+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52976+void
52977+pax_set_initial_flags(struct linux_binprm *bprm)
52978+{
52979+ return;
52980+}
52981+#endif
52982+
52983+#ifdef CONFIG_SYSCTL
52984+__u32
52985+gr_handle_sysctl(const struct ctl_table * table, const int op)
52986+{
52987+ return 0;
52988+}
52989+#endif
52990+
52991+#ifdef CONFIG_TASKSTATS
52992+int gr_is_taskstats_denied(int pid)
52993+{
52994+ return 0;
52995+}
52996+#endif
52997+
52998+int
52999+gr_acl_is_enabled(void)
53000+{
53001+ return 0;
53002+}
53003+
53004+int
53005+gr_handle_rawio(const struct inode *inode)
53006+{
53007+ return 0;
53008+}
53009+
53010+void
53011+gr_acl_handle_psacct(struct task_struct *task, const long code)
53012+{
53013+ return;
53014+}
53015+
53016+int
53017+gr_handle_ptrace(struct task_struct *task, const long request)
53018+{
53019+ return 0;
53020+}
53021+
53022+int
53023+gr_handle_proc_ptrace(struct task_struct *task)
53024+{
53025+ return 0;
53026+}
53027+
53028+void
53029+gr_learn_resource(const struct task_struct *task,
53030+ const int res, const unsigned long wanted, const int gt)
53031+{
53032+ return;
53033+}
53034+
53035+int
53036+gr_set_acls(const int type)
53037+{
53038+ return 0;
53039+}
53040+
53041+int
53042+gr_check_hidden_task(const struct task_struct *tsk)
53043+{
53044+ return 0;
53045+}
53046+
53047+int
53048+gr_check_protected_task(const struct task_struct *task)
53049+{
53050+ return 0;
53051+}
53052+
53053+int
53054+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53055+{
53056+ return 0;
53057+}
53058+
53059+void
53060+gr_copy_label(struct task_struct *tsk)
53061+{
53062+ return;
53063+}
53064+
53065+void
53066+gr_set_pax_flags(struct task_struct *task)
53067+{
53068+ return;
53069+}
53070+
53071+int
53072+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53073+ const int unsafe_share)
53074+{
53075+ return 0;
53076+}
53077+
53078+void
53079+gr_handle_delete(const ino_t ino, const dev_t dev)
53080+{
53081+ return;
53082+}
53083+
53084+void
53085+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53086+{
53087+ return;
53088+}
53089+
53090+void
53091+gr_handle_crash(struct task_struct *task, const int sig)
53092+{
53093+ return;
53094+}
53095+
53096+int
53097+gr_check_crash_exec(const struct file *filp)
53098+{
53099+ return 0;
53100+}
53101+
53102+int
53103+gr_check_crash_uid(const uid_t uid)
53104+{
53105+ return 0;
53106+}
53107+
53108+void
53109+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53110+ struct dentry *old_dentry,
53111+ struct dentry *new_dentry,
53112+ struct vfsmount *mnt, const __u8 replace)
53113+{
53114+ return;
53115+}
53116+
53117+int
53118+gr_search_socket(const int family, const int type, const int protocol)
53119+{
53120+ return 1;
53121+}
53122+
53123+int
53124+gr_search_connectbind(const int mode, const struct socket *sock,
53125+ const struct sockaddr_in *addr)
53126+{
53127+ return 0;
53128+}
53129+
53130+void
53131+gr_handle_alertkill(struct task_struct *task)
53132+{
53133+ return;
53134+}
53135+
53136+__u32
53137+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53138+{
53139+ return 1;
53140+}
53141+
53142+__u32
53143+gr_acl_handle_hidden_file(const struct dentry * dentry,
53144+ const struct vfsmount * mnt)
53145+{
53146+ return 1;
53147+}
53148+
53149+__u32
53150+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53151+ const int fmode)
53152+{
53153+ return 1;
53154+}
53155+
53156+__u32
53157+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53158+{
53159+ return 1;
53160+}
53161+
53162+__u32
53163+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53164+{
53165+ return 1;
53166+}
53167+
53168+int
53169+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53170+ unsigned int *vm_flags)
53171+{
53172+ return 1;
53173+}
53174+
53175+__u32
53176+gr_acl_handle_truncate(const struct dentry * dentry,
53177+ const struct vfsmount * mnt)
53178+{
53179+ return 1;
53180+}
53181+
53182+__u32
53183+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53184+{
53185+ return 1;
53186+}
53187+
53188+__u32
53189+gr_acl_handle_access(const struct dentry * dentry,
53190+ const struct vfsmount * mnt, const int fmode)
53191+{
53192+ return 1;
53193+}
53194+
53195+__u32
53196+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53197+ mode_t mode)
53198+{
53199+ return 1;
53200+}
53201+
53202+__u32
53203+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53204+ mode_t mode)
53205+{
53206+ return 1;
53207+}
53208+
53209+__u32
53210+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53211+{
53212+ return 1;
53213+}
53214+
53215+__u32
53216+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53217+{
53218+ return 1;
53219+}
53220+
53221+void
53222+grsecurity_init(void)
53223+{
53224+ return;
53225+}
53226+
53227+__u32
53228+gr_acl_handle_mknod(const struct dentry * new_dentry,
53229+ const struct dentry * parent_dentry,
53230+ const struct vfsmount * parent_mnt,
53231+ const int mode)
53232+{
53233+ return 1;
53234+}
53235+
53236+__u32
53237+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53238+ const struct dentry * parent_dentry,
53239+ const struct vfsmount * parent_mnt)
53240+{
53241+ return 1;
53242+}
53243+
53244+__u32
53245+gr_acl_handle_symlink(const struct dentry * new_dentry,
53246+ const struct dentry * parent_dentry,
53247+ const struct vfsmount * parent_mnt, const char *from)
53248+{
53249+ return 1;
53250+}
53251+
53252+__u32
53253+gr_acl_handle_link(const struct dentry * new_dentry,
53254+ const struct dentry * parent_dentry,
53255+ const struct vfsmount * parent_mnt,
53256+ const struct dentry * old_dentry,
53257+ const struct vfsmount * old_mnt, const char *to)
53258+{
53259+ return 1;
53260+}
53261+
53262+int
53263+gr_acl_handle_rename(const struct dentry *new_dentry,
53264+ const struct dentry *parent_dentry,
53265+ const struct vfsmount *parent_mnt,
53266+ const struct dentry *old_dentry,
53267+ const struct inode *old_parent_inode,
53268+ const struct vfsmount *old_mnt, const char *newname)
53269+{
53270+ return 0;
53271+}
53272+
53273+int
53274+gr_acl_handle_filldir(const struct file *file, const char *name,
53275+ const int namelen, const ino_t ino)
53276+{
53277+ return 1;
53278+}
53279+
53280+int
53281+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53282+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53283+{
53284+ return 1;
53285+}
53286+
53287+int
53288+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53289+{
53290+ return 0;
53291+}
53292+
53293+int
53294+gr_search_accept(const struct socket *sock)
53295+{
53296+ return 0;
53297+}
53298+
53299+int
53300+gr_search_listen(const struct socket *sock)
53301+{
53302+ return 0;
53303+}
53304+
53305+int
53306+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53307+{
53308+ return 0;
53309+}
53310+
53311+__u32
53312+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53313+{
53314+ return 1;
53315+}
53316+
53317+__u32
53318+gr_acl_handle_creat(const struct dentry * dentry,
53319+ const struct dentry * p_dentry,
53320+ const struct vfsmount * p_mnt, const int fmode,
53321+ const int imode)
53322+{
53323+ return 1;
53324+}
53325+
53326+void
53327+gr_acl_handle_exit(void)
53328+{
53329+ return;
53330+}
53331+
53332+int
53333+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53334+{
53335+ return 1;
53336+}
53337+
53338+void
53339+gr_set_role_label(const uid_t uid, const gid_t gid)
53340+{
53341+ return;
53342+}
53343+
53344+int
53345+gr_acl_handle_procpidmem(const struct task_struct *task)
53346+{
53347+ return 0;
53348+}
53349+
53350+int
53351+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53352+{
53353+ return 0;
53354+}
53355+
53356+int
53357+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53358+{
53359+ return 0;
53360+}
53361+
53362+void
53363+gr_set_kernel_label(struct task_struct *task)
53364+{
53365+ return;
53366+}
53367+
53368+int
53369+gr_check_user_change(int real, int effective, int fs)
53370+{
53371+ return 0;
53372+}
53373+
53374+int
53375+gr_check_group_change(int real, int effective, int fs)
53376+{
53377+ return 0;
53378+}
53379+
53380+int gr_acl_enable_at_secure(void)
53381+{
53382+ return 0;
53383+}
53384+
53385+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53386+{
53387+ return dentry->d_inode->i_sb->s_dev;
53388+}
53389+
53390+EXPORT_SYMBOL(gr_learn_resource);
53391+EXPORT_SYMBOL(gr_set_kernel_label);
53392+#ifdef CONFIG_SECURITY
53393+EXPORT_SYMBOL(gr_check_user_change);
53394+EXPORT_SYMBOL(gr_check_group_change);
53395+#endif
53396diff -urNp linux-3.0.7/grsecurity/grsec_exec.c linux-3.0.7/grsecurity/grsec_exec.c
53397--- linux-3.0.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53398+++ linux-3.0.7/grsecurity/grsec_exec.c 2011-09-14 09:20:28.000000000 -0400
53399@@ -0,0 +1,145 @@
53400+#include <linux/kernel.h>
53401+#include <linux/sched.h>
53402+#include <linux/file.h>
53403+#include <linux/binfmts.h>
53404+#include <linux/fs.h>
53405+#include <linux/types.h>
53406+#include <linux/grdefs.h>
53407+#include <linux/grsecurity.h>
53408+#include <linux/grinternal.h>
53409+#include <linux/capability.h>
53410+#include <linux/module.h>
53411+
53412+#include <asm/uaccess.h>
53413+
53414+#ifdef CONFIG_GRKERNSEC_EXECLOG
53415+static char gr_exec_arg_buf[132];
53416+static DEFINE_MUTEX(gr_exec_arg_mutex);
53417+#endif
53418+
53419+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53420+
53421+void
53422+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53423+{
53424+#ifdef CONFIG_GRKERNSEC_EXECLOG
53425+ char *grarg = gr_exec_arg_buf;
53426+ unsigned int i, x, execlen = 0;
53427+ char c;
53428+
53429+ if (!((grsec_enable_execlog && grsec_enable_group &&
53430+ in_group_p(grsec_audit_gid))
53431+ || (grsec_enable_execlog && !grsec_enable_group)))
53432+ return;
53433+
53434+ mutex_lock(&gr_exec_arg_mutex);
53435+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53436+
53437+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53438+ const char __user *p;
53439+ unsigned int len;
53440+
53441+ p = get_user_arg_ptr(argv, i);
53442+ if (IS_ERR(p))
53443+ goto log;
53444+
53445+ len = strnlen_user(p, 128 - execlen);
53446+ if (len > 128 - execlen)
53447+ len = 128 - execlen;
53448+ else if (len > 0)
53449+ len--;
53450+ if (copy_from_user(grarg + execlen, p, len))
53451+ goto log;
53452+
53453+ /* rewrite unprintable characters */
53454+ for (x = 0; x < len; x++) {
53455+ c = *(grarg + execlen + x);
53456+ if (c < 32 || c > 126)
53457+ *(grarg + execlen + x) = ' ';
53458+ }
53459+
53460+ execlen += len;
53461+ *(grarg + execlen) = ' ';
53462+ *(grarg + execlen + 1) = '\0';
53463+ execlen++;
53464+ }
53465+
53466+ log:
53467+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53468+ bprm->file->f_path.mnt, grarg);
53469+ mutex_unlock(&gr_exec_arg_mutex);
53470+#endif
53471+ return;
53472+}
53473+
53474+#ifdef CONFIG_GRKERNSEC
53475+extern int gr_acl_is_capable(const int cap);
53476+extern int gr_acl_is_capable_nolog(const int cap);
53477+extern int gr_chroot_is_capable(const int cap);
53478+extern int gr_chroot_is_capable_nolog(const int cap);
53479+#endif
53480+
53481+const char *captab_log[] = {
53482+ "CAP_CHOWN",
53483+ "CAP_DAC_OVERRIDE",
53484+ "CAP_DAC_READ_SEARCH",
53485+ "CAP_FOWNER",
53486+ "CAP_FSETID",
53487+ "CAP_KILL",
53488+ "CAP_SETGID",
53489+ "CAP_SETUID",
53490+ "CAP_SETPCAP",
53491+ "CAP_LINUX_IMMUTABLE",
53492+ "CAP_NET_BIND_SERVICE",
53493+ "CAP_NET_BROADCAST",
53494+ "CAP_NET_ADMIN",
53495+ "CAP_NET_RAW",
53496+ "CAP_IPC_LOCK",
53497+ "CAP_IPC_OWNER",
53498+ "CAP_SYS_MODULE",
53499+ "CAP_SYS_RAWIO",
53500+ "CAP_SYS_CHROOT",
53501+ "CAP_SYS_PTRACE",
53502+ "CAP_SYS_PACCT",
53503+ "CAP_SYS_ADMIN",
53504+ "CAP_SYS_BOOT",
53505+ "CAP_SYS_NICE",
53506+ "CAP_SYS_RESOURCE",
53507+ "CAP_SYS_TIME",
53508+ "CAP_SYS_TTY_CONFIG",
53509+ "CAP_MKNOD",
53510+ "CAP_LEASE",
53511+ "CAP_AUDIT_WRITE",
53512+ "CAP_AUDIT_CONTROL",
53513+ "CAP_SETFCAP",
53514+ "CAP_MAC_OVERRIDE",
53515+ "CAP_MAC_ADMIN",
53516+ "CAP_SYSLOG"
53517+};
53518+
53519+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
53520+
53521+int gr_is_capable(const int cap)
53522+{
53523+#ifdef CONFIG_GRKERNSEC
53524+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
53525+ return 1;
53526+ return 0;
53527+#else
53528+ return 1;
53529+#endif
53530+}
53531+
53532+int gr_is_capable_nolog(const int cap)
53533+{
53534+#ifdef CONFIG_GRKERNSEC
53535+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
53536+ return 1;
53537+ return 0;
53538+#else
53539+ return 1;
53540+#endif
53541+}
53542+
53543+EXPORT_SYMBOL(gr_is_capable);
53544+EXPORT_SYMBOL(gr_is_capable_nolog);
53545diff -urNp linux-3.0.7/grsecurity/grsec_fifo.c linux-3.0.7/grsecurity/grsec_fifo.c
53546--- linux-3.0.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53547+++ linux-3.0.7/grsecurity/grsec_fifo.c 2011-08-23 21:48:14.000000000 -0400
53548@@ -0,0 +1,24 @@
53549+#include <linux/kernel.h>
53550+#include <linux/sched.h>
53551+#include <linux/fs.h>
53552+#include <linux/file.h>
53553+#include <linux/grinternal.h>
53554+
53555+int
53556+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53557+ const struct dentry *dir, const int flag, const int acc_mode)
53558+{
53559+#ifdef CONFIG_GRKERNSEC_FIFO
53560+ const struct cred *cred = current_cred();
53561+
53562+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53563+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53564+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53565+ (cred->fsuid != dentry->d_inode->i_uid)) {
53566+ if (!inode_permission(dentry->d_inode, acc_mode))
53567+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53568+ return -EACCES;
53569+ }
53570+#endif
53571+ return 0;
53572+}
53573diff -urNp linux-3.0.7/grsecurity/grsec_fork.c linux-3.0.7/grsecurity/grsec_fork.c
53574--- linux-3.0.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53575+++ linux-3.0.7/grsecurity/grsec_fork.c 2011-08-23 21:48:14.000000000 -0400
53576@@ -0,0 +1,23 @@
53577+#include <linux/kernel.h>
53578+#include <linux/sched.h>
53579+#include <linux/grsecurity.h>
53580+#include <linux/grinternal.h>
53581+#include <linux/errno.h>
53582+
53583+void
53584+gr_log_forkfail(const int retval)
53585+{
53586+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53587+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53588+ switch (retval) {
53589+ case -EAGAIN:
53590+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53591+ break;
53592+ case -ENOMEM:
53593+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53594+ break;
53595+ }
53596+ }
53597+#endif
53598+ return;
53599+}
53600diff -urNp linux-3.0.7/grsecurity/grsec_init.c linux-3.0.7/grsecurity/grsec_init.c
53601--- linux-3.0.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53602+++ linux-3.0.7/grsecurity/grsec_init.c 2011-08-25 17:25:12.000000000 -0400
53603@@ -0,0 +1,269 @@
53604+#include <linux/kernel.h>
53605+#include <linux/sched.h>
53606+#include <linux/mm.h>
53607+#include <linux/gracl.h>
53608+#include <linux/slab.h>
53609+#include <linux/vmalloc.h>
53610+#include <linux/percpu.h>
53611+#include <linux/module.h>
53612+
53613+int grsec_enable_brute;
53614+int grsec_enable_link;
53615+int grsec_enable_dmesg;
53616+int grsec_enable_harden_ptrace;
53617+int grsec_enable_fifo;
53618+int grsec_enable_execlog;
53619+int grsec_enable_signal;
53620+int grsec_enable_forkfail;
53621+int grsec_enable_audit_ptrace;
53622+int grsec_enable_time;
53623+int grsec_enable_audit_textrel;
53624+int grsec_enable_group;
53625+int grsec_audit_gid;
53626+int grsec_enable_chdir;
53627+int grsec_enable_mount;
53628+int grsec_enable_rofs;
53629+int grsec_enable_chroot_findtask;
53630+int grsec_enable_chroot_mount;
53631+int grsec_enable_chroot_shmat;
53632+int grsec_enable_chroot_fchdir;
53633+int grsec_enable_chroot_double;
53634+int grsec_enable_chroot_pivot;
53635+int grsec_enable_chroot_chdir;
53636+int grsec_enable_chroot_chmod;
53637+int grsec_enable_chroot_mknod;
53638+int grsec_enable_chroot_nice;
53639+int grsec_enable_chroot_execlog;
53640+int grsec_enable_chroot_caps;
53641+int grsec_enable_chroot_sysctl;
53642+int grsec_enable_chroot_unix;
53643+int grsec_enable_tpe;
53644+int grsec_tpe_gid;
53645+int grsec_enable_blackhole;
53646+#ifdef CONFIG_IPV6_MODULE
53647+EXPORT_SYMBOL(grsec_enable_blackhole);
53648+#endif
53649+int grsec_lastack_retries;
53650+int grsec_enable_tpe_all;
53651+int grsec_enable_tpe_invert;
53652+int grsec_enable_socket_all;
53653+int grsec_socket_all_gid;
53654+int grsec_enable_socket_client;
53655+int grsec_socket_client_gid;
53656+int grsec_enable_socket_server;
53657+int grsec_socket_server_gid;
53658+int grsec_resource_logging;
53659+int grsec_disable_privio;
53660+int grsec_enable_log_rwxmaps;
53661+int grsec_lock;
53662+
53663+DEFINE_SPINLOCK(grsec_alert_lock);
53664+unsigned long grsec_alert_wtime = 0;
53665+unsigned long grsec_alert_fyet = 0;
53666+
53667+DEFINE_SPINLOCK(grsec_audit_lock);
53668+
53669+DEFINE_RWLOCK(grsec_exec_file_lock);
53670+
53671+char *gr_shared_page[4];
53672+
53673+char *gr_alert_log_fmt;
53674+char *gr_audit_log_fmt;
53675+char *gr_alert_log_buf;
53676+char *gr_audit_log_buf;
53677+
53678+extern struct gr_arg *gr_usermode;
53679+extern unsigned char *gr_system_salt;
53680+extern unsigned char *gr_system_sum;
53681+
53682+void __init
53683+grsecurity_init(void)
53684+{
53685+ int j;
53686+ /* create the per-cpu shared pages */
53687+
53688+#ifdef CONFIG_X86
53689+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53690+#endif
53691+
53692+ for (j = 0; j < 4; j++) {
53693+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53694+ if (gr_shared_page[j] == NULL) {
53695+ panic("Unable to allocate grsecurity shared page");
53696+ return;
53697+ }
53698+ }
53699+
53700+ /* allocate log buffers */
53701+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53702+ if (!gr_alert_log_fmt) {
53703+ panic("Unable to allocate grsecurity alert log format buffer");
53704+ return;
53705+ }
53706+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53707+ if (!gr_audit_log_fmt) {
53708+ panic("Unable to allocate grsecurity audit log format buffer");
53709+ return;
53710+ }
53711+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53712+ if (!gr_alert_log_buf) {
53713+ panic("Unable to allocate grsecurity alert log buffer");
53714+ return;
53715+ }
53716+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53717+ if (!gr_audit_log_buf) {
53718+ panic("Unable to allocate grsecurity audit log buffer");
53719+ return;
53720+ }
53721+
53722+ /* allocate memory for authentication structure */
53723+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53724+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53725+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53726+
53727+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53728+ panic("Unable to allocate grsecurity authentication structure");
53729+ return;
53730+ }
53731+
53732+
53733+#ifdef CONFIG_GRKERNSEC_IO
53734+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53735+ grsec_disable_privio = 1;
53736+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53737+ grsec_disable_privio = 1;
53738+#else
53739+ grsec_disable_privio = 0;
53740+#endif
53741+#endif
53742+
53743+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53744+ /* for backward compatibility, tpe_invert always defaults to on if
53745+ enabled in the kernel
53746+ */
53747+ grsec_enable_tpe_invert = 1;
53748+#endif
53749+
53750+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53751+#ifndef CONFIG_GRKERNSEC_SYSCTL
53752+ grsec_lock = 1;
53753+#endif
53754+
53755+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53756+ grsec_enable_audit_textrel = 1;
53757+#endif
53758+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53759+ grsec_enable_log_rwxmaps = 1;
53760+#endif
53761+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53762+ grsec_enable_group = 1;
53763+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53764+#endif
53765+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53766+ grsec_enable_chdir = 1;
53767+#endif
53768+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53769+ grsec_enable_harden_ptrace = 1;
53770+#endif
53771+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53772+ grsec_enable_mount = 1;
53773+#endif
53774+#ifdef CONFIG_GRKERNSEC_LINK
53775+ grsec_enable_link = 1;
53776+#endif
53777+#ifdef CONFIG_GRKERNSEC_BRUTE
53778+ grsec_enable_brute = 1;
53779+#endif
53780+#ifdef CONFIG_GRKERNSEC_DMESG
53781+ grsec_enable_dmesg = 1;
53782+#endif
53783+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53784+ grsec_enable_blackhole = 1;
53785+ grsec_lastack_retries = 4;
53786+#endif
53787+#ifdef CONFIG_GRKERNSEC_FIFO
53788+ grsec_enable_fifo = 1;
53789+#endif
53790+#ifdef CONFIG_GRKERNSEC_EXECLOG
53791+ grsec_enable_execlog = 1;
53792+#endif
53793+#ifdef CONFIG_GRKERNSEC_SIGNAL
53794+ grsec_enable_signal = 1;
53795+#endif
53796+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53797+ grsec_enable_forkfail = 1;
53798+#endif
53799+#ifdef CONFIG_GRKERNSEC_TIME
53800+ grsec_enable_time = 1;
53801+#endif
53802+#ifdef CONFIG_GRKERNSEC_RESLOG
53803+ grsec_resource_logging = 1;
53804+#endif
53805+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53806+ grsec_enable_chroot_findtask = 1;
53807+#endif
53808+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53809+ grsec_enable_chroot_unix = 1;
53810+#endif
53811+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53812+ grsec_enable_chroot_mount = 1;
53813+#endif
53814+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53815+ grsec_enable_chroot_fchdir = 1;
53816+#endif
53817+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53818+ grsec_enable_chroot_shmat = 1;
53819+#endif
53820+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53821+ grsec_enable_audit_ptrace = 1;
53822+#endif
53823+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53824+ grsec_enable_chroot_double = 1;
53825+#endif
53826+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53827+ grsec_enable_chroot_pivot = 1;
53828+#endif
53829+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53830+ grsec_enable_chroot_chdir = 1;
53831+#endif
53832+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53833+ grsec_enable_chroot_chmod = 1;
53834+#endif
53835+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53836+ grsec_enable_chroot_mknod = 1;
53837+#endif
53838+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53839+ grsec_enable_chroot_nice = 1;
53840+#endif
53841+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53842+ grsec_enable_chroot_execlog = 1;
53843+#endif
53844+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53845+ grsec_enable_chroot_caps = 1;
53846+#endif
53847+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53848+ grsec_enable_chroot_sysctl = 1;
53849+#endif
53850+#ifdef CONFIG_GRKERNSEC_TPE
53851+ grsec_enable_tpe = 1;
53852+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53853+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53854+ grsec_enable_tpe_all = 1;
53855+#endif
53856+#endif
53857+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53858+ grsec_enable_socket_all = 1;
53859+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53860+#endif
53861+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53862+ grsec_enable_socket_client = 1;
53863+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53864+#endif
53865+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53866+ grsec_enable_socket_server = 1;
53867+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53868+#endif
53869+#endif
53870+
53871+ return;
53872+}
53873diff -urNp linux-3.0.7/grsecurity/grsec_link.c linux-3.0.7/grsecurity/grsec_link.c
53874--- linux-3.0.7/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53875+++ linux-3.0.7/grsecurity/grsec_link.c 2011-08-23 21:48:14.000000000 -0400
53876@@ -0,0 +1,43 @@
53877+#include <linux/kernel.h>
53878+#include <linux/sched.h>
53879+#include <linux/fs.h>
53880+#include <linux/file.h>
53881+#include <linux/grinternal.h>
53882+
53883+int
53884+gr_handle_follow_link(const struct inode *parent,
53885+ const struct inode *inode,
53886+ const struct dentry *dentry, const struct vfsmount *mnt)
53887+{
53888+#ifdef CONFIG_GRKERNSEC_LINK
53889+ const struct cred *cred = current_cred();
53890+
53891+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53892+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53893+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53894+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53895+ return -EACCES;
53896+ }
53897+#endif
53898+ return 0;
53899+}
53900+
53901+int
53902+gr_handle_hardlink(const struct dentry *dentry,
53903+ const struct vfsmount *mnt,
53904+ struct inode *inode, const int mode, const char *to)
53905+{
53906+#ifdef CONFIG_GRKERNSEC_LINK
53907+ const struct cred *cred = current_cred();
53908+
53909+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53910+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53911+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53912+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53913+ !capable(CAP_FOWNER) && cred->uid) {
53914+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53915+ return -EPERM;
53916+ }
53917+#endif
53918+ return 0;
53919+}
53920diff -urNp linux-3.0.7/grsecurity/grsec_log.c linux-3.0.7/grsecurity/grsec_log.c
53921--- linux-3.0.7/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53922+++ linux-3.0.7/grsecurity/grsec_log.c 2011-09-26 10:46:21.000000000 -0400
53923@@ -0,0 +1,315 @@
53924+#include <linux/kernel.h>
53925+#include <linux/sched.h>
53926+#include <linux/file.h>
53927+#include <linux/tty.h>
53928+#include <linux/fs.h>
53929+#include <linux/grinternal.h>
53930+
53931+#ifdef CONFIG_TREE_PREEMPT_RCU
53932+#define DISABLE_PREEMPT() preempt_disable()
53933+#define ENABLE_PREEMPT() preempt_enable()
53934+#else
53935+#define DISABLE_PREEMPT()
53936+#define ENABLE_PREEMPT()
53937+#endif
53938+
53939+#define BEGIN_LOCKS(x) \
53940+ DISABLE_PREEMPT(); \
53941+ rcu_read_lock(); \
53942+ read_lock(&tasklist_lock); \
53943+ read_lock(&grsec_exec_file_lock); \
53944+ if (x != GR_DO_AUDIT) \
53945+ spin_lock(&grsec_alert_lock); \
53946+ else \
53947+ spin_lock(&grsec_audit_lock)
53948+
53949+#define END_LOCKS(x) \
53950+ if (x != GR_DO_AUDIT) \
53951+ spin_unlock(&grsec_alert_lock); \
53952+ else \
53953+ spin_unlock(&grsec_audit_lock); \
53954+ read_unlock(&grsec_exec_file_lock); \
53955+ read_unlock(&tasklist_lock); \
53956+ rcu_read_unlock(); \
53957+ ENABLE_PREEMPT(); \
53958+ if (x == GR_DONT_AUDIT) \
53959+ gr_handle_alertkill(current)
53960+
53961+enum {
53962+ FLOODING,
53963+ NO_FLOODING
53964+};
53965+
53966+extern char *gr_alert_log_fmt;
53967+extern char *gr_audit_log_fmt;
53968+extern char *gr_alert_log_buf;
53969+extern char *gr_audit_log_buf;
53970+
53971+static int gr_log_start(int audit)
53972+{
53973+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53974+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53975+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53976+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
53977+ unsigned long curr_secs = get_seconds();
53978+
53979+ if (audit == GR_DO_AUDIT)
53980+ goto set_fmt;
53981+
53982+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
53983+ grsec_alert_wtime = curr_secs;
53984+ grsec_alert_fyet = 0;
53985+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
53986+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53987+ grsec_alert_fyet++;
53988+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53989+ grsec_alert_wtime = curr_secs;
53990+ grsec_alert_fyet++;
53991+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53992+ return FLOODING;
53993+ }
53994+ else return FLOODING;
53995+
53996+set_fmt:
53997+#endif
53998+ memset(buf, 0, PAGE_SIZE);
53999+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54000+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54001+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54002+ } else if (current->signal->curr_ip) {
54003+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54004+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54005+ } else if (gr_acl_is_enabled()) {
54006+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54007+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54008+ } else {
54009+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54010+ strcpy(buf, fmt);
54011+ }
54012+
54013+ return NO_FLOODING;
54014+}
54015+
54016+static void gr_log_middle(int audit, const char *msg, va_list ap)
54017+ __attribute__ ((format (printf, 2, 0)));
54018+
54019+static void gr_log_middle(int audit, const char *msg, va_list ap)
54020+{
54021+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54022+ unsigned int len = strlen(buf);
54023+
54024+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54025+
54026+ return;
54027+}
54028+
54029+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54030+ __attribute__ ((format (printf, 2, 3)));
54031+
54032+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54033+{
54034+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54035+ unsigned int len = strlen(buf);
54036+ va_list ap;
54037+
54038+ va_start(ap, msg);
54039+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54040+ va_end(ap);
54041+
54042+ return;
54043+}
54044+
54045+static void gr_log_end(int audit)
54046+{
54047+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54048+ unsigned int len = strlen(buf);
54049+
54050+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54051+ printk("%s\n", buf);
54052+
54053+ return;
54054+}
54055+
54056+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54057+{
54058+ int logtype;
54059+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54060+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54061+ void *voidptr = NULL;
54062+ int num1 = 0, num2 = 0;
54063+ unsigned long ulong1 = 0, ulong2 = 0;
54064+ struct dentry *dentry = NULL;
54065+ struct vfsmount *mnt = NULL;
54066+ struct file *file = NULL;
54067+ struct task_struct *task = NULL;
54068+ const struct cred *cred, *pcred;
54069+ va_list ap;
54070+
54071+ BEGIN_LOCKS(audit);
54072+ logtype = gr_log_start(audit);
54073+ if (logtype == FLOODING) {
54074+ END_LOCKS(audit);
54075+ return;
54076+ }
54077+ va_start(ap, argtypes);
54078+ switch (argtypes) {
54079+ case GR_TTYSNIFF:
54080+ task = va_arg(ap, struct task_struct *);
54081+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54082+ break;
54083+ case GR_SYSCTL_HIDDEN:
54084+ str1 = va_arg(ap, char *);
54085+ gr_log_middle_varargs(audit, msg, result, str1);
54086+ break;
54087+ case GR_RBAC:
54088+ dentry = va_arg(ap, struct dentry *);
54089+ mnt = va_arg(ap, struct vfsmount *);
54090+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54091+ break;
54092+ case GR_RBAC_STR:
54093+ dentry = va_arg(ap, struct dentry *);
54094+ mnt = va_arg(ap, struct vfsmount *);
54095+ str1 = va_arg(ap, char *);
54096+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54097+ break;
54098+ case GR_STR_RBAC:
54099+ str1 = va_arg(ap, char *);
54100+ dentry = va_arg(ap, struct dentry *);
54101+ mnt = va_arg(ap, struct vfsmount *);
54102+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54103+ break;
54104+ case GR_RBAC_MODE2:
54105+ dentry = va_arg(ap, struct dentry *);
54106+ mnt = va_arg(ap, struct vfsmount *);
54107+ str1 = va_arg(ap, char *);
54108+ str2 = va_arg(ap, char *);
54109+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54110+ break;
54111+ case GR_RBAC_MODE3:
54112+ dentry = va_arg(ap, struct dentry *);
54113+ mnt = va_arg(ap, struct vfsmount *);
54114+ str1 = va_arg(ap, char *);
54115+ str2 = va_arg(ap, char *);
54116+ str3 = va_arg(ap, char *);
54117+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54118+ break;
54119+ case GR_FILENAME:
54120+ dentry = va_arg(ap, struct dentry *);
54121+ mnt = va_arg(ap, struct vfsmount *);
54122+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54123+ break;
54124+ case GR_STR_FILENAME:
54125+ str1 = va_arg(ap, char *);
54126+ dentry = va_arg(ap, struct dentry *);
54127+ mnt = va_arg(ap, struct vfsmount *);
54128+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54129+ break;
54130+ case GR_FILENAME_STR:
54131+ dentry = va_arg(ap, struct dentry *);
54132+ mnt = va_arg(ap, struct vfsmount *);
54133+ str1 = va_arg(ap, char *);
54134+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54135+ break;
54136+ case GR_FILENAME_TWO_INT:
54137+ dentry = va_arg(ap, struct dentry *);
54138+ mnt = va_arg(ap, struct vfsmount *);
54139+ num1 = va_arg(ap, int);
54140+ num2 = va_arg(ap, int);
54141+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54142+ break;
54143+ case GR_FILENAME_TWO_INT_STR:
54144+ dentry = va_arg(ap, struct dentry *);
54145+ mnt = va_arg(ap, struct vfsmount *);
54146+ num1 = va_arg(ap, int);
54147+ num2 = va_arg(ap, int);
54148+ str1 = va_arg(ap, char *);
54149+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54150+ break;
54151+ case GR_TEXTREL:
54152+ file = va_arg(ap, struct file *);
54153+ ulong1 = va_arg(ap, unsigned long);
54154+ ulong2 = va_arg(ap, unsigned long);
54155+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54156+ break;
54157+ case GR_PTRACE:
54158+ task = va_arg(ap, struct task_struct *);
54159+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54160+ break;
54161+ case GR_RESOURCE:
54162+ task = va_arg(ap, struct task_struct *);
54163+ cred = __task_cred(task);
54164+ pcred = __task_cred(task->real_parent);
54165+ ulong1 = va_arg(ap, unsigned long);
54166+ str1 = va_arg(ap, char *);
54167+ ulong2 = va_arg(ap, unsigned long);
54168+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54169+ break;
54170+ case GR_CAP:
54171+ task = va_arg(ap, struct task_struct *);
54172+ cred = __task_cred(task);
54173+ pcred = __task_cred(task->real_parent);
54174+ str1 = va_arg(ap, char *);
54175+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54176+ break;
54177+ case GR_SIG:
54178+ str1 = va_arg(ap, char *);
54179+ voidptr = va_arg(ap, void *);
54180+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54181+ break;
54182+ case GR_SIG2:
54183+ task = va_arg(ap, struct task_struct *);
54184+ cred = __task_cred(task);
54185+ pcred = __task_cred(task->real_parent);
54186+ num1 = va_arg(ap, int);
54187+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54188+ break;
54189+ case GR_CRASH1:
54190+ task = va_arg(ap, struct task_struct *);
54191+ cred = __task_cred(task);
54192+ pcred = __task_cred(task->real_parent);
54193+ ulong1 = va_arg(ap, unsigned long);
54194+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54195+ break;
54196+ case GR_CRASH2:
54197+ task = va_arg(ap, struct task_struct *);
54198+ cred = __task_cred(task);
54199+ pcred = __task_cred(task->real_parent);
54200+ ulong1 = va_arg(ap, unsigned long);
54201+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54202+ break;
54203+ case GR_RWXMAP:
54204+ file = va_arg(ap, struct file *);
54205+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54206+ break;
54207+ case GR_PSACCT:
54208+ {
54209+ unsigned int wday, cday;
54210+ __u8 whr, chr;
54211+ __u8 wmin, cmin;
54212+ __u8 wsec, csec;
54213+ char cur_tty[64] = { 0 };
54214+ char parent_tty[64] = { 0 };
54215+
54216+ task = va_arg(ap, struct task_struct *);
54217+ wday = va_arg(ap, unsigned int);
54218+ cday = va_arg(ap, unsigned int);
54219+ whr = va_arg(ap, int);
54220+ chr = va_arg(ap, int);
54221+ wmin = va_arg(ap, int);
54222+ cmin = va_arg(ap, int);
54223+ wsec = va_arg(ap, int);
54224+ csec = va_arg(ap, int);
54225+ ulong1 = va_arg(ap, unsigned long);
54226+ cred = __task_cred(task);
54227+ pcred = __task_cred(task->real_parent);
54228+
54229+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54230+ }
54231+ break;
54232+ default:
54233+ gr_log_middle(audit, msg, ap);
54234+ }
54235+ va_end(ap);
54236+ gr_log_end(audit);
54237+ END_LOCKS(audit);
54238+}
54239diff -urNp linux-3.0.7/grsecurity/grsec_mem.c linux-3.0.7/grsecurity/grsec_mem.c
54240--- linux-3.0.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54241+++ linux-3.0.7/grsecurity/grsec_mem.c 2011-08-23 21:48:14.000000000 -0400
54242@@ -0,0 +1,33 @@
54243+#include <linux/kernel.h>
54244+#include <linux/sched.h>
54245+#include <linux/mm.h>
54246+#include <linux/mman.h>
54247+#include <linux/grinternal.h>
54248+
54249+void
54250+gr_handle_ioperm(void)
54251+{
54252+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54253+ return;
54254+}
54255+
54256+void
54257+gr_handle_iopl(void)
54258+{
54259+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54260+ return;
54261+}
54262+
54263+void
54264+gr_handle_mem_readwrite(u64 from, u64 to)
54265+{
54266+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54267+ return;
54268+}
54269+
54270+void
54271+gr_handle_vm86(void)
54272+{
54273+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54274+ return;
54275+}
54276diff -urNp linux-3.0.7/grsecurity/grsec_mount.c linux-3.0.7/grsecurity/grsec_mount.c
54277--- linux-3.0.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54278+++ linux-3.0.7/grsecurity/grsec_mount.c 2011-08-23 21:48:14.000000000 -0400
54279@@ -0,0 +1,62 @@
54280+#include <linux/kernel.h>
54281+#include <linux/sched.h>
54282+#include <linux/mount.h>
54283+#include <linux/grsecurity.h>
54284+#include <linux/grinternal.h>
54285+
54286+void
54287+gr_log_remount(const char *devname, const int retval)
54288+{
54289+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54290+ if (grsec_enable_mount && (retval >= 0))
54291+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54292+#endif
54293+ return;
54294+}
54295+
54296+void
54297+gr_log_unmount(const char *devname, const int retval)
54298+{
54299+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54300+ if (grsec_enable_mount && (retval >= 0))
54301+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54302+#endif
54303+ return;
54304+}
54305+
54306+void
54307+gr_log_mount(const char *from, const char *to, const int retval)
54308+{
54309+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54310+ if (grsec_enable_mount && (retval >= 0))
54311+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54312+#endif
54313+ return;
54314+}
54315+
54316+int
54317+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54318+{
54319+#ifdef CONFIG_GRKERNSEC_ROFS
54320+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54321+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54322+ return -EPERM;
54323+ } else
54324+ return 0;
54325+#endif
54326+ return 0;
54327+}
54328+
54329+int
54330+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54331+{
54332+#ifdef CONFIG_GRKERNSEC_ROFS
54333+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54334+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54335+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54336+ return -EPERM;
54337+ } else
54338+ return 0;
54339+#endif
54340+ return 0;
54341+}
54342diff -urNp linux-3.0.7/grsecurity/grsec_pax.c linux-3.0.7/grsecurity/grsec_pax.c
54343--- linux-3.0.7/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54344+++ linux-3.0.7/grsecurity/grsec_pax.c 2011-08-23 21:48:14.000000000 -0400
54345@@ -0,0 +1,36 @@
54346+#include <linux/kernel.h>
54347+#include <linux/sched.h>
54348+#include <linux/mm.h>
54349+#include <linux/file.h>
54350+#include <linux/grinternal.h>
54351+#include <linux/grsecurity.h>
54352+
54353+void
54354+gr_log_textrel(struct vm_area_struct * vma)
54355+{
54356+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54357+ if (grsec_enable_audit_textrel)
54358+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54359+#endif
54360+ return;
54361+}
54362+
54363+void
54364+gr_log_rwxmmap(struct file *file)
54365+{
54366+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54367+ if (grsec_enable_log_rwxmaps)
54368+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54369+#endif
54370+ return;
54371+}
54372+
54373+void
54374+gr_log_rwxmprotect(struct file *file)
54375+{
54376+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54377+ if (grsec_enable_log_rwxmaps)
54378+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54379+#endif
54380+ return;
54381+}
54382diff -urNp linux-3.0.7/grsecurity/grsec_ptrace.c linux-3.0.7/grsecurity/grsec_ptrace.c
54383--- linux-3.0.7/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54384+++ linux-3.0.7/grsecurity/grsec_ptrace.c 2011-08-23 21:48:14.000000000 -0400
54385@@ -0,0 +1,14 @@
54386+#include <linux/kernel.h>
54387+#include <linux/sched.h>
54388+#include <linux/grinternal.h>
54389+#include <linux/grsecurity.h>
54390+
54391+void
54392+gr_audit_ptrace(struct task_struct *task)
54393+{
54394+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54395+ if (grsec_enable_audit_ptrace)
54396+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54397+#endif
54398+ return;
54399+}
54400diff -urNp linux-3.0.7/grsecurity/grsec_sig.c linux-3.0.7/grsecurity/grsec_sig.c
54401--- linux-3.0.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54402+++ linux-3.0.7/grsecurity/grsec_sig.c 2011-08-23 21:48:14.000000000 -0400
54403@@ -0,0 +1,206 @@
54404+#include <linux/kernel.h>
54405+#include <linux/sched.h>
54406+#include <linux/delay.h>
54407+#include <linux/grsecurity.h>
54408+#include <linux/grinternal.h>
54409+#include <linux/hardirq.h>
54410+
54411+char *signames[] = {
54412+ [SIGSEGV] = "Segmentation fault",
54413+ [SIGILL] = "Illegal instruction",
54414+ [SIGABRT] = "Abort",
54415+ [SIGBUS] = "Invalid alignment/Bus error"
54416+};
54417+
54418+void
54419+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54420+{
54421+#ifdef CONFIG_GRKERNSEC_SIGNAL
54422+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54423+ (sig == SIGABRT) || (sig == SIGBUS))) {
54424+ if (t->pid == current->pid) {
54425+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54426+ } else {
54427+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54428+ }
54429+ }
54430+#endif
54431+ return;
54432+}
54433+
54434+int
54435+gr_handle_signal(const struct task_struct *p, const int sig)
54436+{
54437+#ifdef CONFIG_GRKERNSEC
54438+ if (current->pid > 1 && gr_check_protected_task(p)) {
54439+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54440+ return -EPERM;
54441+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54442+ return -EPERM;
54443+ }
54444+#endif
54445+ return 0;
54446+}
54447+
54448+#ifdef CONFIG_GRKERNSEC
54449+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54450+
54451+int gr_fake_force_sig(int sig, struct task_struct *t)
54452+{
54453+ unsigned long int flags;
54454+ int ret, blocked, ignored;
54455+ struct k_sigaction *action;
54456+
54457+ spin_lock_irqsave(&t->sighand->siglock, flags);
54458+ action = &t->sighand->action[sig-1];
54459+ ignored = action->sa.sa_handler == SIG_IGN;
54460+ blocked = sigismember(&t->blocked, sig);
54461+ if (blocked || ignored) {
54462+ action->sa.sa_handler = SIG_DFL;
54463+ if (blocked) {
54464+ sigdelset(&t->blocked, sig);
54465+ recalc_sigpending_and_wake(t);
54466+ }
54467+ }
54468+ if (action->sa.sa_handler == SIG_DFL)
54469+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54470+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54471+
54472+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54473+
54474+ return ret;
54475+}
54476+#endif
54477+
54478+#ifdef CONFIG_GRKERNSEC_BRUTE
54479+#define GR_USER_BAN_TIME (15 * 60)
54480+
54481+static int __get_dumpable(unsigned long mm_flags)
54482+{
54483+ int ret;
54484+
54485+ ret = mm_flags & MMF_DUMPABLE_MASK;
54486+ return (ret >= 2) ? 2 : ret;
54487+}
54488+#endif
54489+
54490+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54491+{
54492+#ifdef CONFIG_GRKERNSEC_BRUTE
54493+ uid_t uid = 0;
54494+
54495+ if (!grsec_enable_brute)
54496+ return;
54497+
54498+ rcu_read_lock();
54499+ read_lock(&tasklist_lock);
54500+ read_lock(&grsec_exec_file_lock);
54501+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54502+ p->real_parent->brute = 1;
54503+ else {
54504+ const struct cred *cred = __task_cred(p), *cred2;
54505+ struct task_struct *tsk, *tsk2;
54506+
54507+ if (!__get_dumpable(mm_flags) && cred->uid) {
54508+ struct user_struct *user;
54509+
54510+ uid = cred->uid;
54511+
54512+ /* this is put upon execution past expiration */
54513+ user = find_user(uid);
54514+ if (user == NULL)
54515+ goto unlock;
54516+ user->banned = 1;
54517+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54518+ if (user->ban_expires == ~0UL)
54519+ user->ban_expires--;
54520+
54521+ do_each_thread(tsk2, tsk) {
54522+ cred2 = __task_cred(tsk);
54523+ if (tsk != p && cred2->uid == uid)
54524+ gr_fake_force_sig(SIGKILL, tsk);
54525+ } while_each_thread(tsk2, tsk);
54526+ }
54527+ }
54528+unlock:
54529+ read_unlock(&grsec_exec_file_lock);
54530+ read_unlock(&tasklist_lock);
54531+ rcu_read_unlock();
54532+
54533+ if (uid)
54534+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54535+
54536+#endif
54537+ return;
54538+}
54539+
54540+void gr_handle_brute_check(void)
54541+{
54542+#ifdef CONFIG_GRKERNSEC_BRUTE
54543+ if (current->brute)
54544+ msleep(30 * 1000);
54545+#endif
54546+ return;
54547+}
54548+
54549+void gr_handle_kernel_exploit(void)
54550+{
54551+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54552+ const struct cred *cred;
54553+ struct task_struct *tsk, *tsk2;
54554+ struct user_struct *user;
54555+ uid_t uid;
54556+
54557+ if (in_irq() || in_serving_softirq() || in_nmi())
54558+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54559+
54560+ uid = current_uid();
54561+
54562+ if (uid == 0)
54563+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54564+ else {
54565+ /* kill all the processes of this user, hold a reference
54566+ to their creds struct, and prevent them from creating
54567+ another process until system reset
54568+ */
54569+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54570+ /* we intentionally leak this ref */
54571+ user = get_uid(current->cred->user);
54572+ if (user) {
54573+ user->banned = 1;
54574+ user->ban_expires = ~0UL;
54575+ }
54576+
54577+ read_lock(&tasklist_lock);
54578+ do_each_thread(tsk2, tsk) {
54579+ cred = __task_cred(tsk);
54580+ if (cred->uid == uid)
54581+ gr_fake_force_sig(SIGKILL, tsk);
54582+ } while_each_thread(tsk2, tsk);
54583+ read_unlock(&tasklist_lock);
54584+ }
54585+#endif
54586+}
54587+
54588+int __gr_process_user_ban(struct user_struct *user)
54589+{
54590+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54591+ if (unlikely(user->banned)) {
54592+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54593+ user->banned = 0;
54594+ user->ban_expires = 0;
54595+ free_uid(user);
54596+ } else
54597+ return -EPERM;
54598+ }
54599+#endif
54600+ return 0;
54601+}
54602+
54603+int gr_process_user_ban(void)
54604+{
54605+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54606+ return __gr_process_user_ban(current->cred->user);
54607+#endif
54608+ return 0;
54609+}
54610diff -urNp linux-3.0.7/grsecurity/grsec_sock.c linux-3.0.7/grsecurity/grsec_sock.c
54611--- linux-3.0.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54612+++ linux-3.0.7/grsecurity/grsec_sock.c 2011-08-23 21:48:14.000000000 -0400
54613@@ -0,0 +1,244 @@
54614+#include <linux/kernel.h>
54615+#include <linux/module.h>
54616+#include <linux/sched.h>
54617+#include <linux/file.h>
54618+#include <linux/net.h>
54619+#include <linux/in.h>
54620+#include <linux/ip.h>
54621+#include <net/sock.h>
54622+#include <net/inet_sock.h>
54623+#include <linux/grsecurity.h>
54624+#include <linux/grinternal.h>
54625+#include <linux/gracl.h>
54626+
54627+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54628+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54629+
54630+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54631+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54632+
54633+#ifdef CONFIG_UNIX_MODULE
54634+EXPORT_SYMBOL(gr_acl_handle_unix);
54635+EXPORT_SYMBOL(gr_acl_handle_mknod);
54636+EXPORT_SYMBOL(gr_handle_chroot_unix);
54637+EXPORT_SYMBOL(gr_handle_create);
54638+#endif
54639+
54640+#ifdef CONFIG_GRKERNSEC
54641+#define gr_conn_table_size 32749
54642+struct conn_table_entry {
54643+ struct conn_table_entry *next;
54644+ struct signal_struct *sig;
54645+};
54646+
54647+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54648+DEFINE_SPINLOCK(gr_conn_table_lock);
54649+
54650+extern const char * gr_socktype_to_name(unsigned char type);
54651+extern const char * gr_proto_to_name(unsigned char proto);
54652+extern const char * gr_sockfamily_to_name(unsigned char family);
54653+
54654+static __inline__ int
54655+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54656+{
54657+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54658+}
54659+
54660+static __inline__ int
54661+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54662+ __u16 sport, __u16 dport)
54663+{
54664+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54665+ sig->gr_sport == sport && sig->gr_dport == dport))
54666+ return 1;
54667+ else
54668+ return 0;
54669+}
54670+
54671+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54672+{
54673+ struct conn_table_entry **match;
54674+ unsigned int index;
54675+
54676+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54677+ sig->gr_sport, sig->gr_dport,
54678+ gr_conn_table_size);
54679+
54680+ newent->sig = sig;
54681+
54682+ match = &gr_conn_table[index];
54683+ newent->next = *match;
54684+ *match = newent;
54685+
54686+ return;
54687+}
54688+
54689+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54690+{
54691+ struct conn_table_entry *match, *last = NULL;
54692+ unsigned int index;
54693+
54694+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54695+ sig->gr_sport, sig->gr_dport,
54696+ gr_conn_table_size);
54697+
54698+ match = gr_conn_table[index];
54699+ while (match && !conn_match(match->sig,
54700+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54701+ sig->gr_dport)) {
54702+ last = match;
54703+ match = match->next;
54704+ }
54705+
54706+ if (match) {
54707+ if (last)
54708+ last->next = match->next;
54709+ else
54710+ gr_conn_table[index] = NULL;
54711+ kfree(match);
54712+ }
54713+
54714+ return;
54715+}
54716+
54717+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54718+ __u16 sport, __u16 dport)
54719+{
54720+ struct conn_table_entry *match;
54721+ unsigned int index;
54722+
54723+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54724+
54725+ match = gr_conn_table[index];
54726+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54727+ match = match->next;
54728+
54729+ if (match)
54730+ return match->sig;
54731+ else
54732+ return NULL;
54733+}
54734+
54735+#endif
54736+
54737+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54738+{
54739+#ifdef CONFIG_GRKERNSEC
54740+ struct signal_struct *sig = task->signal;
54741+ struct conn_table_entry *newent;
54742+
54743+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54744+ if (newent == NULL)
54745+ return;
54746+ /* no bh lock needed since we are called with bh disabled */
54747+ spin_lock(&gr_conn_table_lock);
54748+ gr_del_task_from_ip_table_nolock(sig);
54749+ sig->gr_saddr = inet->inet_rcv_saddr;
54750+ sig->gr_daddr = inet->inet_daddr;
54751+ sig->gr_sport = inet->inet_sport;
54752+ sig->gr_dport = inet->inet_dport;
54753+ gr_add_to_task_ip_table_nolock(sig, newent);
54754+ spin_unlock(&gr_conn_table_lock);
54755+#endif
54756+ return;
54757+}
54758+
54759+void gr_del_task_from_ip_table(struct task_struct *task)
54760+{
54761+#ifdef CONFIG_GRKERNSEC
54762+ spin_lock_bh(&gr_conn_table_lock);
54763+ gr_del_task_from_ip_table_nolock(task->signal);
54764+ spin_unlock_bh(&gr_conn_table_lock);
54765+#endif
54766+ return;
54767+}
54768+
54769+void
54770+gr_attach_curr_ip(const struct sock *sk)
54771+{
54772+#ifdef CONFIG_GRKERNSEC
54773+ struct signal_struct *p, *set;
54774+ const struct inet_sock *inet = inet_sk(sk);
54775+
54776+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54777+ return;
54778+
54779+ set = current->signal;
54780+
54781+ spin_lock_bh(&gr_conn_table_lock);
54782+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
54783+ inet->inet_dport, inet->inet_sport);
54784+ if (unlikely(p != NULL)) {
54785+ set->curr_ip = p->curr_ip;
54786+ set->used_accept = 1;
54787+ gr_del_task_from_ip_table_nolock(p);
54788+ spin_unlock_bh(&gr_conn_table_lock);
54789+ return;
54790+ }
54791+ spin_unlock_bh(&gr_conn_table_lock);
54792+
54793+ set->curr_ip = inet->inet_daddr;
54794+ set->used_accept = 1;
54795+#endif
54796+ return;
54797+}
54798+
54799+int
54800+gr_handle_sock_all(const int family, const int type, const int protocol)
54801+{
54802+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54803+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54804+ (family != AF_UNIX)) {
54805+ if (family == AF_INET)
54806+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54807+ else
54808+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54809+ return -EACCES;
54810+ }
54811+#endif
54812+ return 0;
54813+}
54814+
54815+int
54816+gr_handle_sock_server(const struct sockaddr *sck)
54817+{
54818+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54819+ if (grsec_enable_socket_server &&
54820+ in_group_p(grsec_socket_server_gid) &&
54821+ sck && (sck->sa_family != AF_UNIX) &&
54822+ (sck->sa_family != AF_LOCAL)) {
54823+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54824+ return -EACCES;
54825+ }
54826+#endif
54827+ return 0;
54828+}
54829+
54830+int
54831+gr_handle_sock_server_other(const struct sock *sck)
54832+{
54833+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54834+ if (grsec_enable_socket_server &&
54835+ in_group_p(grsec_socket_server_gid) &&
54836+ sck && (sck->sk_family != AF_UNIX) &&
54837+ (sck->sk_family != AF_LOCAL)) {
54838+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54839+ return -EACCES;
54840+ }
54841+#endif
54842+ return 0;
54843+}
54844+
54845+int
54846+gr_handle_sock_client(const struct sockaddr *sck)
54847+{
54848+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54849+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54850+ sck && (sck->sa_family != AF_UNIX) &&
54851+ (sck->sa_family != AF_LOCAL)) {
54852+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54853+ return -EACCES;
54854+ }
54855+#endif
54856+ return 0;
54857+}
54858diff -urNp linux-3.0.7/grsecurity/grsec_sysctl.c linux-3.0.7/grsecurity/grsec_sysctl.c
54859--- linux-3.0.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54860+++ linux-3.0.7/grsecurity/grsec_sysctl.c 2011-08-25 17:26:15.000000000 -0400
54861@@ -0,0 +1,433 @@
54862+#include <linux/kernel.h>
54863+#include <linux/sched.h>
54864+#include <linux/sysctl.h>
54865+#include <linux/grsecurity.h>
54866+#include <linux/grinternal.h>
54867+
54868+int
54869+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54870+{
54871+#ifdef CONFIG_GRKERNSEC_SYSCTL
54872+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54873+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54874+ return -EACCES;
54875+ }
54876+#endif
54877+ return 0;
54878+}
54879+
54880+#ifdef CONFIG_GRKERNSEC_ROFS
54881+static int __maybe_unused one = 1;
54882+#endif
54883+
54884+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54885+struct ctl_table grsecurity_table[] = {
54886+#ifdef CONFIG_GRKERNSEC_SYSCTL
54887+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54888+#ifdef CONFIG_GRKERNSEC_IO
54889+ {
54890+ .procname = "disable_priv_io",
54891+ .data = &grsec_disable_privio,
54892+ .maxlen = sizeof(int),
54893+ .mode = 0600,
54894+ .proc_handler = &proc_dointvec,
54895+ },
54896+#endif
54897+#endif
54898+#ifdef CONFIG_GRKERNSEC_LINK
54899+ {
54900+ .procname = "linking_restrictions",
54901+ .data = &grsec_enable_link,
54902+ .maxlen = sizeof(int),
54903+ .mode = 0600,
54904+ .proc_handler = &proc_dointvec,
54905+ },
54906+#endif
54907+#ifdef CONFIG_GRKERNSEC_BRUTE
54908+ {
54909+ .procname = "deter_bruteforce",
54910+ .data = &grsec_enable_brute,
54911+ .maxlen = sizeof(int),
54912+ .mode = 0600,
54913+ .proc_handler = &proc_dointvec,
54914+ },
54915+#endif
54916+#ifdef CONFIG_GRKERNSEC_FIFO
54917+ {
54918+ .procname = "fifo_restrictions",
54919+ .data = &grsec_enable_fifo,
54920+ .maxlen = sizeof(int),
54921+ .mode = 0600,
54922+ .proc_handler = &proc_dointvec,
54923+ },
54924+#endif
54925+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54926+ {
54927+ .procname = "ip_blackhole",
54928+ .data = &grsec_enable_blackhole,
54929+ .maxlen = sizeof(int),
54930+ .mode = 0600,
54931+ .proc_handler = &proc_dointvec,
54932+ },
54933+ {
54934+ .procname = "lastack_retries",
54935+ .data = &grsec_lastack_retries,
54936+ .maxlen = sizeof(int),
54937+ .mode = 0600,
54938+ .proc_handler = &proc_dointvec,
54939+ },
54940+#endif
54941+#ifdef CONFIG_GRKERNSEC_EXECLOG
54942+ {
54943+ .procname = "exec_logging",
54944+ .data = &grsec_enable_execlog,
54945+ .maxlen = sizeof(int),
54946+ .mode = 0600,
54947+ .proc_handler = &proc_dointvec,
54948+ },
54949+#endif
54950+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54951+ {
54952+ .procname = "rwxmap_logging",
54953+ .data = &grsec_enable_log_rwxmaps,
54954+ .maxlen = sizeof(int),
54955+ .mode = 0600,
54956+ .proc_handler = &proc_dointvec,
54957+ },
54958+#endif
54959+#ifdef CONFIG_GRKERNSEC_SIGNAL
54960+ {
54961+ .procname = "signal_logging",
54962+ .data = &grsec_enable_signal,
54963+ .maxlen = sizeof(int),
54964+ .mode = 0600,
54965+ .proc_handler = &proc_dointvec,
54966+ },
54967+#endif
54968+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54969+ {
54970+ .procname = "forkfail_logging",
54971+ .data = &grsec_enable_forkfail,
54972+ .maxlen = sizeof(int),
54973+ .mode = 0600,
54974+ .proc_handler = &proc_dointvec,
54975+ },
54976+#endif
54977+#ifdef CONFIG_GRKERNSEC_TIME
54978+ {
54979+ .procname = "timechange_logging",
54980+ .data = &grsec_enable_time,
54981+ .maxlen = sizeof(int),
54982+ .mode = 0600,
54983+ .proc_handler = &proc_dointvec,
54984+ },
54985+#endif
54986+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54987+ {
54988+ .procname = "chroot_deny_shmat",
54989+ .data = &grsec_enable_chroot_shmat,
54990+ .maxlen = sizeof(int),
54991+ .mode = 0600,
54992+ .proc_handler = &proc_dointvec,
54993+ },
54994+#endif
54995+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54996+ {
54997+ .procname = "chroot_deny_unix",
54998+ .data = &grsec_enable_chroot_unix,
54999+ .maxlen = sizeof(int),
55000+ .mode = 0600,
55001+ .proc_handler = &proc_dointvec,
55002+ },
55003+#endif
55004+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55005+ {
55006+ .procname = "chroot_deny_mount",
55007+ .data = &grsec_enable_chroot_mount,
55008+ .maxlen = sizeof(int),
55009+ .mode = 0600,
55010+ .proc_handler = &proc_dointvec,
55011+ },
55012+#endif
55013+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55014+ {
55015+ .procname = "chroot_deny_fchdir",
55016+ .data = &grsec_enable_chroot_fchdir,
55017+ .maxlen = sizeof(int),
55018+ .mode = 0600,
55019+ .proc_handler = &proc_dointvec,
55020+ },
55021+#endif
55022+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55023+ {
55024+ .procname = "chroot_deny_chroot",
55025+ .data = &grsec_enable_chroot_double,
55026+ .maxlen = sizeof(int),
55027+ .mode = 0600,
55028+ .proc_handler = &proc_dointvec,
55029+ },
55030+#endif
55031+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55032+ {
55033+ .procname = "chroot_deny_pivot",
55034+ .data = &grsec_enable_chroot_pivot,
55035+ .maxlen = sizeof(int),
55036+ .mode = 0600,
55037+ .proc_handler = &proc_dointvec,
55038+ },
55039+#endif
55040+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55041+ {
55042+ .procname = "chroot_enforce_chdir",
55043+ .data = &grsec_enable_chroot_chdir,
55044+ .maxlen = sizeof(int),
55045+ .mode = 0600,
55046+ .proc_handler = &proc_dointvec,
55047+ },
55048+#endif
55049+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55050+ {
55051+ .procname = "chroot_deny_chmod",
55052+ .data = &grsec_enable_chroot_chmod,
55053+ .maxlen = sizeof(int),
55054+ .mode = 0600,
55055+ .proc_handler = &proc_dointvec,
55056+ },
55057+#endif
55058+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55059+ {
55060+ .procname = "chroot_deny_mknod",
55061+ .data = &grsec_enable_chroot_mknod,
55062+ .maxlen = sizeof(int),
55063+ .mode = 0600,
55064+ .proc_handler = &proc_dointvec,
55065+ },
55066+#endif
55067+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55068+ {
55069+ .procname = "chroot_restrict_nice",
55070+ .data = &grsec_enable_chroot_nice,
55071+ .maxlen = sizeof(int),
55072+ .mode = 0600,
55073+ .proc_handler = &proc_dointvec,
55074+ },
55075+#endif
55076+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55077+ {
55078+ .procname = "chroot_execlog",
55079+ .data = &grsec_enable_chroot_execlog,
55080+ .maxlen = sizeof(int),
55081+ .mode = 0600,
55082+ .proc_handler = &proc_dointvec,
55083+ },
55084+#endif
55085+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55086+ {
55087+ .procname = "chroot_caps",
55088+ .data = &grsec_enable_chroot_caps,
55089+ .maxlen = sizeof(int),
55090+ .mode = 0600,
55091+ .proc_handler = &proc_dointvec,
55092+ },
55093+#endif
55094+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55095+ {
55096+ .procname = "chroot_deny_sysctl",
55097+ .data = &grsec_enable_chroot_sysctl,
55098+ .maxlen = sizeof(int),
55099+ .mode = 0600,
55100+ .proc_handler = &proc_dointvec,
55101+ },
55102+#endif
55103+#ifdef CONFIG_GRKERNSEC_TPE
55104+ {
55105+ .procname = "tpe",
55106+ .data = &grsec_enable_tpe,
55107+ .maxlen = sizeof(int),
55108+ .mode = 0600,
55109+ .proc_handler = &proc_dointvec,
55110+ },
55111+ {
55112+ .procname = "tpe_gid",
55113+ .data = &grsec_tpe_gid,
55114+ .maxlen = sizeof(int),
55115+ .mode = 0600,
55116+ .proc_handler = &proc_dointvec,
55117+ },
55118+#endif
55119+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55120+ {
55121+ .procname = "tpe_invert",
55122+ .data = &grsec_enable_tpe_invert,
55123+ .maxlen = sizeof(int),
55124+ .mode = 0600,
55125+ .proc_handler = &proc_dointvec,
55126+ },
55127+#endif
55128+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55129+ {
55130+ .procname = "tpe_restrict_all",
55131+ .data = &grsec_enable_tpe_all,
55132+ .maxlen = sizeof(int),
55133+ .mode = 0600,
55134+ .proc_handler = &proc_dointvec,
55135+ },
55136+#endif
55137+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55138+ {
55139+ .procname = "socket_all",
55140+ .data = &grsec_enable_socket_all,
55141+ .maxlen = sizeof(int),
55142+ .mode = 0600,
55143+ .proc_handler = &proc_dointvec,
55144+ },
55145+ {
55146+ .procname = "socket_all_gid",
55147+ .data = &grsec_socket_all_gid,
55148+ .maxlen = sizeof(int),
55149+ .mode = 0600,
55150+ .proc_handler = &proc_dointvec,
55151+ },
55152+#endif
55153+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55154+ {
55155+ .procname = "socket_client",
55156+ .data = &grsec_enable_socket_client,
55157+ .maxlen = sizeof(int),
55158+ .mode = 0600,
55159+ .proc_handler = &proc_dointvec,
55160+ },
55161+ {
55162+ .procname = "socket_client_gid",
55163+ .data = &grsec_socket_client_gid,
55164+ .maxlen = sizeof(int),
55165+ .mode = 0600,
55166+ .proc_handler = &proc_dointvec,
55167+ },
55168+#endif
55169+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55170+ {
55171+ .procname = "socket_server",
55172+ .data = &grsec_enable_socket_server,
55173+ .maxlen = sizeof(int),
55174+ .mode = 0600,
55175+ .proc_handler = &proc_dointvec,
55176+ },
55177+ {
55178+ .procname = "socket_server_gid",
55179+ .data = &grsec_socket_server_gid,
55180+ .maxlen = sizeof(int),
55181+ .mode = 0600,
55182+ .proc_handler = &proc_dointvec,
55183+ },
55184+#endif
55185+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55186+ {
55187+ .procname = "audit_group",
55188+ .data = &grsec_enable_group,
55189+ .maxlen = sizeof(int),
55190+ .mode = 0600,
55191+ .proc_handler = &proc_dointvec,
55192+ },
55193+ {
55194+ .procname = "audit_gid",
55195+ .data = &grsec_audit_gid,
55196+ .maxlen = sizeof(int),
55197+ .mode = 0600,
55198+ .proc_handler = &proc_dointvec,
55199+ },
55200+#endif
55201+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55202+ {
55203+ .procname = "audit_chdir",
55204+ .data = &grsec_enable_chdir,
55205+ .maxlen = sizeof(int),
55206+ .mode = 0600,
55207+ .proc_handler = &proc_dointvec,
55208+ },
55209+#endif
55210+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55211+ {
55212+ .procname = "audit_mount",
55213+ .data = &grsec_enable_mount,
55214+ .maxlen = sizeof(int),
55215+ .mode = 0600,
55216+ .proc_handler = &proc_dointvec,
55217+ },
55218+#endif
55219+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55220+ {
55221+ .procname = "audit_textrel",
55222+ .data = &grsec_enable_audit_textrel,
55223+ .maxlen = sizeof(int),
55224+ .mode = 0600,
55225+ .proc_handler = &proc_dointvec,
55226+ },
55227+#endif
55228+#ifdef CONFIG_GRKERNSEC_DMESG
55229+ {
55230+ .procname = "dmesg",
55231+ .data = &grsec_enable_dmesg,
55232+ .maxlen = sizeof(int),
55233+ .mode = 0600,
55234+ .proc_handler = &proc_dointvec,
55235+ },
55236+#endif
55237+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55238+ {
55239+ .procname = "chroot_findtask",
55240+ .data = &grsec_enable_chroot_findtask,
55241+ .maxlen = sizeof(int),
55242+ .mode = 0600,
55243+ .proc_handler = &proc_dointvec,
55244+ },
55245+#endif
55246+#ifdef CONFIG_GRKERNSEC_RESLOG
55247+ {
55248+ .procname = "resource_logging",
55249+ .data = &grsec_resource_logging,
55250+ .maxlen = sizeof(int),
55251+ .mode = 0600,
55252+ .proc_handler = &proc_dointvec,
55253+ },
55254+#endif
55255+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55256+ {
55257+ .procname = "audit_ptrace",
55258+ .data = &grsec_enable_audit_ptrace,
55259+ .maxlen = sizeof(int),
55260+ .mode = 0600,
55261+ .proc_handler = &proc_dointvec,
55262+ },
55263+#endif
55264+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55265+ {
55266+ .procname = "harden_ptrace",
55267+ .data = &grsec_enable_harden_ptrace,
55268+ .maxlen = sizeof(int),
55269+ .mode = 0600,
55270+ .proc_handler = &proc_dointvec,
55271+ },
55272+#endif
55273+ {
55274+ .procname = "grsec_lock",
55275+ .data = &grsec_lock,
55276+ .maxlen = sizeof(int),
55277+ .mode = 0600,
55278+ .proc_handler = &proc_dointvec,
55279+ },
55280+#endif
55281+#ifdef CONFIG_GRKERNSEC_ROFS
55282+ {
55283+ .procname = "romount_protect",
55284+ .data = &grsec_enable_rofs,
55285+ .maxlen = sizeof(int),
55286+ .mode = 0600,
55287+ .proc_handler = &proc_dointvec_minmax,
55288+ .extra1 = &one,
55289+ .extra2 = &one,
55290+ },
55291+#endif
55292+ { }
55293+};
55294+#endif
55295diff -urNp linux-3.0.7/grsecurity/grsec_time.c linux-3.0.7/grsecurity/grsec_time.c
55296--- linux-3.0.7/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55297+++ linux-3.0.7/grsecurity/grsec_time.c 2011-08-23 21:48:14.000000000 -0400
55298@@ -0,0 +1,16 @@
55299+#include <linux/kernel.h>
55300+#include <linux/sched.h>
55301+#include <linux/grinternal.h>
55302+#include <linux/module.h>
55303+
55304+void
55305+gr_log_timechange(void)
55306+{
55307+#ifdef CONFIG_GRKERNSEC_TIME
55308+ if (grsec_enable_time)
55309+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55310+#endif
55311+ return;
55312+}
55313+
55314+EXPORT_SYMBOL(gr_log_timechange);
55315diff -urNp linux-3.0.7/grsecurity/grsec_tpe.c linux-3.0.7/grsecurity/grsec_tpe.c
55316--- linux-3.0.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55317+++ linux-3.0.7/grsecurity/grsec_tpe.c 2011-08-23 21:48:14.000000000 -0400
55318@@ -0,0 +1,39 @@
55319+#include <linux/kernel.h>
55320+#include <linux/sched.h>
55321+#include <linux/file.h>
55322+#include <linux/fs.h>
55323+#include <linux/grinternal.h>
55324+
55325+extern int gr_acl_tpe_check(void);
55326+
55327+int
55328+gr_tpe_allow(const struct file *file)
55329+{
55330+#ifdef CONFIG_GRKERNSEC
55331+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55332+ const struct cred *cred = current_cred();
55333+
55334+ if (cred->uid && ((grsec_enable_tpe &&
55335+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55336+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55337+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55338+#else
55339+ in_group_p(grsec_tpe_gid)
55340+#endif
55341+ ) || gr_acl_tpe_check()) &&
55342+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55343+ (inode->i_mode & S_IWOTH))))) {
55344+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55345+ return 0;
55346+ }
55347+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55348+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55349+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55350+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55351+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55352+ return 0;
55353+ }
55354+#endif
55355+#endif
55356+ return 1;
55357+}
55358diff -urNp linux-3.0.7/grsecurity/grsum.c linux-3.0.7/grsecurity/grsum.c
55359--- linux-3.0.7/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55360+++ linux-3.0.7/grsecurity/grsum.c 2011-08-23 21:48:14.000000000 -0400
55361@@ -0,0 +1,61 @@
55362+#include <linux/err.h>
55363+#include <linux/kernel.h>
55364+#include <linux/sched.h>
55365+#include <linux/mm.h>
55366+#include <linux/scatterlist.h>
55367+#include <linux/crypto.h>
55368+#include <linux/gracl.h>
55369+
55370+
55371+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55372+#error "crypto and sha256 must be built into the kernel"
55373+#endif
55374+
55375+int
55376+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55377+{
55378+ char *p;
55379+ struct crypto_hash *tfm;
55380+ struct hash_desc desc;
55381+ struct scatterlist sg;
55382+ unsigned char temp_sum[GR_SHA_LEN];
55383+ volatile int retval = 0;
55384+ volatile int dummy = 0;
55385+ unsigned int i;
55386+
55387+ sg_init_table(&sg, 1);
55388+
55389+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55390+ if (IS_ERR(tfm)) {
55391+ /* should never happen, since sha256 should be built in */
55392+ return 1;
55393+ }
55394+
55395+ desc.tfm = tfm;
55396+ desc.flags = 0;
55397+
55398+ crypto_hash_init(&desc);
55399+
55400+ p = salt;
55401+ sg_set_buf(&sg, p, GR_SALT_LEN);
55402+ crypto_hash_update(&desc, &sg, sg.length);
55403+
55404+ p = entry->pw;
55405+ sg_set_buf(&sg, p, strlen(p));
55406+
55407+ crypto_hash_update(&desc, &sg, sg.length);
55408+
55409+ crypto_hash_final(&desc, temp_sum);
55410+
55411+ memset(entry->pw, 0, GR_PW_LEN);
55412+
55413+ for (i = 0; i < GR_SHA_LEN; i++)
55414+ if (sum[i] != temp_sum[i])
55415+ retval = 1;
55416+ else
55417+ dummy = 1; // waste a cycle
55418+
55419+ crypto_free_hash(tfm);
55420+
55421+ return retval;
55422+}
55423diff -urNp linux-3.0.7/grsecurity/Kconfig linux-3.0.7/grsecurity/Kconfig
55424--- linux-3.0.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55425+++ linux-3.0.7/grsecurity/Kconfig 2011-09-15 00:00:57.000000000 -0400
55426@@ -0,0 +1,1038 @@
55427+#
55428+# grecurity configuration
55429+#
55430+
55431+menu "Grsecurity"
55432+
55433+config GRKERNSEC
55434+ bool "Grsecurity"
55435+ select CRYPTO
55436+ select CRYPTO_SHA256
55437+ help
55438+ If you say Y here, you will be able to configure many features
55439+ that will enhance the security of your system. It is highly
55440+ recommended that you say Y here and read through the help
55441+ for each option so that you fully understand the features and
55442+ can evaluate their usefulness for your machine.
55443+
55444+choice
55445+ prompt "Security Level"
55446+ depends on GRKERNSEC
55447+ default GRKERNSEC_CUSTOM
55448+
55449+config GRKERNSEC_LOW
55450+ bool "Low"
55451+ select GRKERNSEC_LINK
55452+ select GRKERNSEC_FIFO
55453+ select GRKERNSEC_RANDNET
55454+ select GRKERNSEC_DMESG
55455+ select GRKERNSEC_CHROOT
55456+ select GRKERNSEC_CHROOT_CHDIR
55457+
55458+ help
55459+ If you choose this option, several of the grsecurity options will
55460+ be enabled that will give you greater protection against a number
55461+ of attacks, while assuring that none of your software will have any
55462+ conflicts with the additional security measures. If you run a lot
55463+ of unusual software, or you are having problems with the higher
55464+ security levels, you should say Y here. With this option, the
55465+ following features are enabled:
55466+
55467+ - Linking restrictions
55468+ - FIFO restrictions
55469+ - Restricted dmesg
55470+ - Enforced chdir("/") on chroot
55471+ - Runtime module disabling
55472+
55473+config GRKERNSEC_MEDIUM
55474+ bool "Medium"
55475+ select PAX
55476+ select PAX_EI_PAX
55477+ select PAX_PT_PAX_FLAGS
55478+ select PAX_HAVE_ACL_FLAGS
55479+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55480+ select GRKERNSEC_CHROOT
55481+ select GRKERNSEC_CHROOT_SYSCTL
55482+ select GRKERNSEC_LINK
55483+ select GRKERNSEC_FIFO
55484+ select GRKERNSEC_DMESG
55485+ select GRKERNSEC_RANDNET
55486+ select GRKERNSEC_FORKFAIL
55487+ select GRKERNSEC_TIME
55488+ select GRKERNSEC_SIGNAL
55489+ select GRKERNSEC_CHROOT
55490+ select GRKERNSEC_CHROOT_UNIX
55491+ select GRKERNSEC_CHROOT_MOUNT
55492+ select GRKERNSEC_CHROOT_PIVOT
55493+ select GRKERNSEC_CHROOT_DOUBLE
55494+ select GRKERNSEC_CHROOT_CHDIR
55495+ select GRKERNSEC_CHROOT_MKNOD
55496+ select GRKERNSEC_PROC
55497+ select GRKERNSEC_PROC_USERGROUP
55498+ select PAX_RANDUSTACK
55499+ select PAX_ASLR
55500+ select PAX_RANDMMAP
55501+ select PAX_REFCOUNT if (X86 || SPARC64)
55502+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55503+
55504+ help
55505+ If you say Y here, several features in addition to those included
55506+ in the low additional security level will be enabled. These
55507+ features provide even more security to your system, though in rare
55508+ cases they may be incompatible with very old or poorly written
55509+ software. If you enable this option, make sure that your auth
55510+ service (identd) is running as gid 1001. With this option,
55511+ the following features (in addition to those provided in the
55512+ low additional security level) will be enabled:
55513+
55514+ - Failed fork logging
55515+ - Time change logging
55516+ - Signal logging
55517+ - Deny mounts in chroot
55518+ - Deny double chrooting
55519+ - Deny sysctl writes in chroot
55520+ - Deny mknod in chroot
55521+ - Deny access to abstract AF_UNIX sockets out of chroot
55522+ - Deny pivot_root in chroot
55523+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55524+ - /proc restrictions with special GID set to 10 (usually wheel)
55525+ - Address Space Layout Randomization (ASLR)
55526+ - Prevent exploitation of most refcount overflows
55527+ - Bounds checking of copying between the kernel and userland
55528+
55529+config GRKERNSEC_HIGH
55530+ bool "High"
55531+ select GRKERNSEC_LINK
55532+ select GRKERNSEC_FIFO
55533+ select GRKERNSEC_DMESG
55534+ select GRKERNSEC_FORKFAIL
55535+ select GRKERNSEC_TIME
55536+ select GRKERNSEC_SIGNAL
55537+ select GRKERNSEC_CHROOT
55538+ select GRKERNSEC_CHROOT_SHMAT
55539+ select GRKERNSEC_CHROOT_UNIX
55540+ select GRKERNSEC_CHROOT_MOUNT
55541+ select GRKERNSEC_CHROOT_FCHDIR
55542+ select GRKERNSEC_CHROOT_PIVOT
55543+ select GRKERNSEC_CHROOT_DOUBLE
55544+ select GRKERNSEC_CHROOT_CHDIR
55545+ select GRKERNSEC_CHROOT_MKNOD
55546+ select GRKERNSEC_CHROOT_CAPS
55547+ select GRKERNSEC_CHROOT_SYSCTL
55548+ select GRKERNSEC_CHROOT_FINDTASK
55549+ select GRKERNSEC_SYSFS_RESTRICT
55550+ select GRKERNSEC_PROC
55551+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55552+ select GRKERNSEC_HIDESYM
55553+ select GRKERNSEC_BRUTE
55554+ select GRKERNSEC_PROC_USERGROUP
55555+ select GRKERNSEC_KMEM
55556+ select GRKERNSEC_RESLOG
55557+ select GRKERNSEC_RANDNET
55558+ select GRKERNSEC_PROC_ADD
55559+ select GRKERNSEC_CHROOT_CHMOD
55560+ select GRKERNSEC_CHROOT_NICE
55561+ select GRKERNSEC_AUDIT_MOUNT
55562+ select GRKERNSEC_MODHARDEN if (MODULES)
55563+ select GRKERNSEC_HARDEN_PTRACE
55564+ select GRKERNSEC_VM86 if (X86_32)
55565+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55566+ select PAX
55567+ select PAX_RANDUSTACK
55568+ select PAX_ASLR
55569+ select PAX_RANDMMAP
55570+ select PAX_NOEXEC
55571+ select PAX_MPROTECT
55572+ select PAX_EI_PAX
55573+ select PAX_PT_PAX_FLAGS
55574+ select PAX_HAVE_ACL_FLAGS
55575+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55576+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55577+ select PAX_RANDKSTACK if (X86_TSC && X86)
55578+ select PAX_SEGMEXEC if (X86_32)
55579+ select PAX_PAGEEXEC
55580+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55581+ select PAX_EMUTRAMP if (PARISC)
55582+ select PAX_EMUSIGRT if (PARISC)
55583+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55584+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55585+ select PAX_REFCOUNT if (X86 || SPARC64)
55586+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
55587+ help
55588+ If you say Y here, many of the features of grsecurity will be
55589+ enabled, which will protect you against many kinds of attacks
55590+ against your system. The heightened security comes at a cost
55591+ of an increased chance of incompatibilities with rare software
55592+ on your machine. Since this security level enables PaX, you should
55593+ view <http://pax.grsecurity.net> and read about the PaX
55594+ project. While you are there, download chpax and run it on
55595+ binaries that cause problems with PaX. Also remember that
55596+ since the /proc restrictions are enabled, you must run your
55597+ identd as gid 1001. This security level enables the following
55598+ features in addition to those listed in the low and medium
55599+ security levels:
55600+
55601+ - Additional /proc restrictions
55602+ - Chmod restrictions in chroot
55603+ - No signals, ptrace, or viewing of processes outside of chroot
55604+ - Capability restrictions in chroot
55605+ - Deny fchdir out of chroot
55606+ - Priority restrictions in chroot
55607+ - Segmentation-based implementation of PaX
55608+ - Mprotect restrictions
55609+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55610+ - Kernel stack randomization
55611+ - Mount/unmount/remount logging
55612+ - Kernel symbol hiding
55613+ - Prevention of memory exhaustion-based exploits
55614+ - Hardening of module auto-loading
55615+ - Ptrace restrictions
55616+ - Restricted vm86 mode
55617+ - Restricted sysfs/debugfs
55618+ - Active kernel exploit response
55619+
55620+config GRKERNSEC_CUSTOM
55621+ bool "Custom"
55622+ help
55623+ If you say Y here, you will be able to configure every grsecurity
55624+ option, which allows you to enable many more features that aren't
55625+ covered in the basic security levels. These additional features
55626+ include TPE, socket restrictions, and the sysctl system for
55627+ grsecurity. It is advised that you read through the help for
55628+ each option to determine its usefulness in your situation.
55629+
55630+endchoice
55631+
55632+menu "Address Space Protection"
55633+depends on GRKERNSEC
55634+
55635+config GRKERNSEC_KMEM
55636+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55637+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55638+ help
55639+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55640+ be written to via mmap or otherwise to modify the running kernel.
55641+ /dev/port will also not be allowed to be opened. If you have module
55642+ support disabled, enabling this will close up four ways that are
55643+ currently used to insert malicious code into the running kernel.
55644+ Even with all these features enabled, we still highly recommend that
55645+ you use the RBAC system, as it is still possible for an attacker to
55646+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55647+ If you are not using XFree86, you may be able to stop this additional
55648+ case by enabling the 'Disable privileged I/O' option. Though nothing
55649+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55650+ but only to video memory, which is the only writing we allow in this
55651+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55652+ not be allowed to mprotect it with PROT_WRITE later.
55653+ It is highly recommended that you say Y here if you meet all the
55654+ conditions above.
55655+
55656+config GRKERNSEC_VM86
55657+ bool "Restrict VM86 mode"
55658+ depends on X86_32
55659+
55660+ help
55661+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55662+ make use of a special execution mode on 32bit x86 processors called
55663+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55664+ video cards and will still work with this option enabled. The purpose
55665+ of the option is to prevent exploitation of emulation errors in
55666+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55667+ Nearly all users should be able to enable this option.
55668+
55669+config GRKERNSEC_IO
55670+ bool "Disable privileged I/O"
55671+ depends on X86
55672+ select RTC_CLASS
55673+ select RTC_INTF_DEV
55674+ select RTC_DRV_CMOS
55675+
55676+ help
55677+ If you say Y here, all ioperm and iopl calls will return an error.
55678+ Ioperm and iopl can be used to modify the running kernel.
55679+ Unfortunately, some programs need this access to operate properly,
55680+ the most notable of which are XFree86 and hwclock. hwclock can be
55681+ remedied by having RTC support in the kernel, so real-time
55682+ clock support is enabled if this option is enabled, to ensure
55683+ that hwclock operates correctly. XFree86 still will not
55684+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55685+ IF YOU USE XFree86. If you use XFree86 and you still want to
55686+ protect your kernel against modification, use the RBAC system.
55687+
55688+config GRKERNSEC_PROC_MEMMAP
55689+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55690+ default y if (PAX_NOEXEC || PAX_ASLR)
55691+ depends on PAX_NOEXEC || PAX_ASLR
55692+ help
55693+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55694+ give no information about the addresses of its mappings if
55695+ PaX features that rely on random addresses are enabled on the task.
55696+ If you use PaX it is greatly recommended that you say Y here as it
55697+ closes up a hole that makes the full ASLR useless for suid
55698+ binaries.
55699+
55700+config GRKERNSEC_BRUTE
55701+ bool "Deter exploit bruteforcing"
55702+ help
55703+ If you say Y here, attempts to bruteforce exploits against forking
55704+ daemons such as apache or sshd, as well as against suid/sgid binaries
55705+ will be deterred. When a child of a forking daemon is killed by PaX
55706+ or crashes due to an illegal instruction or other suspicious signal,
55707+ the parent process will be delayed 30 seconds upon every subsequent
55708+ fork until the administrator is able to assess the situation and
55709+ restart the daemon.
55710+ In the suid/sgid case, the attempt is logged, the user has all their
55711+ processes terminated, and they are prevented from executing any further
55712+ processes for 15 minutes.
55713+ It is recommended that you also enable signal logging in the auditing
55714+ section so that logs are generated when a process triggers a suspicious
55715+ signal.
55716+ If the sysctl option is enabled, a sysctl option with name
55717+ "deter_bruteforce" is created.
55718+
55719+
55720+config GRKERNSEC_MODHARDEN
55721+ bool "Harden module auto-loading"
55722+ depends on MODULES
55723+ help
55724+ If you say Y here, module auto-loading in response to use of some
55725+ feature implemented by an unloaded module will be restricted to
55726+ root users. Enabling this option helps defend against attacks
55727+ by unprivileged users who abuse the auto-loading behavior to
55728+ cause a vulnerable module to load that is then exploited.
55729+
55730+ If this option prevents a legitimate use of auto-loading for a
55731+ non-root user, the administrator can execute modprobe manually
55732+ with the exact name of the module mentioned in the alert log.
55733+ Alternatively, the administrator can add the module to the list
55734+ of modules loaded at boot by modifying init scripts.
55735+
55736+ Modification of init scripts will most likely be needed on
55737+ Ubuntu servers with encrypted home directory support enabled,
55738+ as the first non-root user logging in will cause the ecb(aes),
55739+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55740+
55741+config GRKERNSEC_HIDESYM
55742+ bool "Hide kernel symbols"
55743+ help
55744+ If you say Y here, getting information on loaded modules, and
55745+ displaying all kernel symbols through a syscall will be restricted
55746+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55747+ /proc/kallsyms will be restricted to the root user. The RBAC
55748+ system can hide that entry even from root.
55749+
55750+ This option also prevents leaking of kernel addresses through
55751+ several /proc entries.
55752+
55753+ Note that this option is only effective provided the following
55754+ conditions are met:
55755+ 1) The kernel using grsecurity is not precompiled by some distribution
55756+ 2) You have also enabled GRKERNSEC_DMESG
55757+ 3) You are using the RBAC system and hiding other files such as your
55758+ kernel image and System.map. Alternatively, enabling this option
55759+ causes the permissions on /boot, /lib/modules, and the kernel
55760+ source directory to change at compile time to prevent
55761+ reading by non-root users.
55762+ If the above conditions are met, this option will aid in providing a
55763+ useful protection against local kernel exploitation of overflows
55764+ and arbitrary read/write vulnerabilities.
55765+
55766+config GRKERNSEC_KERN_LOCKOUT
55767+ bool "Active kernel exploit response"
55768+ depends on X86 || ARM || PPC || SPARC
55769+ help
55770+ If you say Y here, when a PaX alert is triggered due to suspicious
55771+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55772+ or an OOPs occurs due to bad memory accesses, instead of just
55773+ terminating the offending process (and potentially allowing
55774+ a subsequent exploit from the same user), we will take one of two
55775+ actions:
55776+ If the user was root, we will panic the system
55777+ If the user was non-root, we will log the attempt, terminate
55778+ all processes owned by the user, then prevent them from creating
55779+ any new processes until the system is restarted
55780+ This deters repeated kernel exploitation/bruteforcing attempts
55781+ and is useful for later forensics.
55782+
55783+endmenu
55784+menu "Role Based Access Control Options"
55785+depends on GRKERNSEC
55786+
55787+config GRKERNSEC_RBAC_DEBUG
55788+ bool
55789+
55790+config GRKERNSEC_NO_RBAC
55791+ bool "Disable RBAC system"
55792+ help
55793+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55794+ preventing the RBAC system from being enabled. You should only say Y
55795+ here if you have no intention of using the RBAC system, so as to prevent
55796+ an attacker with root access from misusing the RBAC system to hide files
55797+ and processes when loadable module support and /dev/[k]mem have been
55798+ locked down.
55799+
55800+config GRKERNSEC_ACL_HIDEKERN
55801+ bool "Hide kernel processes"
55802+ help
55803+ If you say Y here, all kernel threads will be hidden to all
55804+ processes but those whose subject has the "view hidden processes"
55805+ flag.
55806+
55807+config GRKERNSEC_ACL_MAXTRIES
55808+ int "Maximum tries before password lockout"
55809+ default 3
55810+ help
55811+ This option enforces the maximum number of times a user can attempt
55812+ to authorize themselves with the grsecurity RBAC system before being
55813+ denied the ability to attempt authorization again for a specified time.
55814+ The lower the number, the harder it will be to brute-force a password.
55815+
55816+config GRKERNSEC_ACL_TIMEOUT
55817+ int "Time to wait after max password tries, in seconds"
55818+ default 30
55819+ help
55820+ This option specifies the time the user must wait after attempting to
55821+ authorize to the RBAC system with the maximum number of invalid
55822+ passwords. The higher the number, the harder it will be to brute-force
55823+ a password.
55824+
55825+endmenu
55826+menu "Filesystem Protections"
55827+depends on GRKERNSEC
55828+
55829+config GRKERNSEC_PROC
55830+ bool "Proc restrictions"
55831+ help
55832+ If you say Y here, the permissions of the /proc filesystem
55833+ will be altered to enhance system security and privacy. You MUST
55834+ choose either a user only restriction or a user and group restriction.
55835+ Depending upon the option you choose, you can either restrict users to
55836+ see only the processes they themselves run, or choose a group that can
55837+ view all processes and files normally restricted to root if you choose
55838+ the "restrict to user only" option. NOTE: If you're running identd as
55839+ a non-root user, you will have to run it as the group you specify here.
55840+
55841+config GRKERNSEC_PROC_USER
55842+ bool "Restrict /proc to user only"
55843+ depends on GRKERNSEC_PROC
55844+ help
55845+ If you say Y here, non-root users will only be able to view their own
55846+ processes, and restricts them from viewing network-related information,
55847+ and viewing kernel symbol and module information.
55848+
55849+config GRKERNSEC_PROC_USERGROUP
55850+ bool "Allow special group"
55851+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55852+ help
55853+ If you say Y here, you will be able to select a group that will be
55854+ able to view all processes and network-related information. If you've
55855+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55856+ remain hidden. This option is useful if you want to run identd as
55857+ a non-root user.
55858+
55859+config GRKERNSEC_PROC_GID
55860+ int "GID for special group"
55861+ depends on GRKERNSEC_PROC_USERGROUP
55862+ default 1001
55863+
55864+config GRKERNSEC_PROC_ADD
55865+ bool "Additional restrictions"
55866+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55867+ help
55868+ If you say Y here, additional restrictions will be placed on
55869+ /proc that keep normal users from viewing device information and
55870+ slabinfo information that could be useful for exploits.
55871+
55872+config GRKERNSEC_LINK
55873+ bool "Linking restrictions"
55874+ help
55875+ If you say Y here, /tmp race exploits will be prevented, since users
55876+ will no longer be able to follow symlinks owned by other users in
55877+ world-writable +t directories (e.g. /tmp), unless the owner of the
55878+ symlink is the owner of the directory. users will also not be
55879+ able to hardlink to files they do not own. If the sysctl option is
55880+ enabled, a sysctl option with name "linking_restrictions" is created.
55881+
55882+config GRKERNSEC_FIFO
55883+ bool "FIFO restrictions"
55884+ help
55885+ If you say Y here, users will not be able to write to FIFOs they don't
55886+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55887+ the FIFO is the same owner of the directory it's held in. If the sysctl
55888+ option is enabled, a sysctl option with name "fifo_restrictions" is
55889+ created.
55890+
55891+config GRKERNSEC_SYSFS_RESTRICT
55892+ bool "Sysfs/debugfs restriction"
55893+ depends on SYSFS
55894+ help
55895+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55896+ any filesystem normally mounted under it (e.g. debugfs) will only
55897+ be accessible by root. These filesystems generally provide access
55898+ to hardware and debug information that isn't appropriate for unprivileged
55899+ users of the system. Sysfs and debugfs have also become a large source
55900+ of new vulnerabilities, ranging from infoleaks to local compromise.
55901+ There has been very little oversight with an eye toward security involved
55902+ in adding new exporters of information to these filesystems, so their
55903+ use is discouraged.
55904+ This option is equivalent to a chmod 0700 of the mount paths.
55905+
55906+config GRKERNSEC_ROFS
55907+ bool "Runtime read-only mount protection"
55908+ help
55909+ If you say Y here, a sysctl option with name "romount_protect" will
55910+ be created. By setting this option to 1 at runtime, filesystems
55911+ will be protected in the following ways:
55912+ * No new writable mounts will be allowed
55913+ * Existing read-only mounts won't be able to be remounted read/write
55914+ * Write operations will be denied on all block devices
55915+ This option acts independently of grsec_lock: once it is set to 1,
55916+ it cannot be turned off. Therefore, please be mindful of the resulting
55917+ behavior if this option is enabled in an init script on a read-only
55918+ filesystem. This feature is mainly intended for secure embedded systems.
55919+
55920+config GRKERNSEC_CHROOT
55921+ bool "Chroot jail restrictions"
55922+ help
55923+ If you say Y here, you will be able to choose several options that will
55924+ make breaking out of a chrooted jail much more difficult. If you
55925+ encounter no software incompatibilities with the following options, it
55926+ is recommended that you enable each one.
55927+
55928+config GRKERNSEC_CHROOT_MOUNT
55929+ bool "Deny mounts"
55930+ depends on GRKERNSEC_CHROOT
55931+ help
55932+ If you say Y here, processes inside a chroot will not be able to
55933+ mount or remount filesystems. If the sysctl option is enabled, a
55934+ sysctl option with name "chroot_deny_mount" is created.
55935+
55936+config GRKERNSEC_CHROOT_DOUBLE
55937+ bool "Deny double-chroots"
55938+ depends on GRKERNSEC_CHROOT
55939+ help
55940+ If you say Y here, processes inside a chroot will not be able to chroot
55941+ again outside the chroot. This is a widely used method of breaking
55942+ out of a chroot jail and should not be allowed. If the sysctl
55943+ option is enabled, a sysctl option with name
55944+ "chroot_deny_chroot" is created.
55945+
55946+config GRKERNSEC_CHROOT_PIVOT
55947+ bool "Deny pivot_root in chroot"
55948+ depends on GRKERNSEC_CHROOT
55949+ help
55950+ If you say Y here, processes inside a chroot will not be able to use
55951+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55952+ works similar to chroot in that it changes the root filesystem. This
55953+ function could be misused in a chrooted process to attempt to break out
55954+ of the chroot, and therefore should not be allowed. If the sysctl
55955+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55956+ created.
55957+
55958+config GRKERNSEC_CHROOT_CHDIR
55959+ bool "Enforce chdir(\"/\") on all chroots"
55960+ depends on GRKERNSEC_CHROOT
55961+ help
55962+ If you say Y here, the current working directory of all newly-chrooted
55963+ applications will be set to the the root directory of the chroot.
55964+ The man page on chroot(2) states:
55965+ Note that this call does not change the current working
55966+ directory, so that `.' can be outside the tree rooted at
55967+ `/'. In particular, the super-user can escape from a
55968+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55969+
55970+ It is recommended that you say Y here, since it's not known to break
55971+ any software. If the sysctl option is enabled, a sysctl option with
55972+ name "chroot_enforce_chdir" is created.
55973+
55974+config GRKERNSEC_CHROOT_CHMOD
55975+ bool "Deny (f)chmod +s"
55976+ depends on GRKERNSEC_CHROOT
55977+ help
55978+ If you say Y here, processes inside a chroot will not be able to chmod
55979+ or fchmod files to make them have suid or sgid bits. This protects
55980+ against another published method of breaking a chroot. If the sysctl
55981+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55982+ created.
55983+
55984+config GRKERNSEC_CHROOT_FCHDIR
55985+ bool "Deny fchdir out of chroot"
55986+ depends on GRKERNSEC_CHROOT
55987+ help
55988+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55989+ to a file descriptor of the chrooting process that points to a directory
55990+ outside the filesystem will be stopped. If the sysctl option
55991+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55992+
55993+config GRKERNSEC_CHROOT_MKNOD
55994+ bool "Deny mknod"
55995+ depends on GRKERNSEC_CHROOT
55996+ help
55997+ If you say Y here, processes inside a chroot will not be allowed to
55998+ mknod. The problem with using mknod inside a chroot is that it
55999+ would allow an attacker to create a device entry that is the same
56000+ as one on the physical root of your system, which could range from
56001+ anything from the console device to a device for your harddrive (which
56002+ they could then use to wipe the drive or steal data). It is recommended
56003+ that you say Y here, unless you run into software incompatibilities.
56004+ If the sysctl option is enabled, a sysctl option with name
56005+ "chroot_deny_mknod" is created.
56006+
56007+config GRKERNSEC_CHROOT_SHMAT
56008+ bool "Deny shmat() out of chroot"
56009+ depends on GRKERNSEC_CHROOT
56010+ help
56011+ If you say Y here, processes inside a chroot will not be able to attach
56012+ to shared memory segments that were created outside of the chroot jail.
56013+ It is recommended that you say Y here. If the sysctl option is enabled,
56014+ a sysctl option with name "chroot_deny_shmat" is created.
56015+
56016+config GRKERNSEC_CHROOT_UNIX
56017+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56018+ depends on GRKERNSEC_CHROOT
56019+ help
56020+ If you say Y here, processes inside a chroot will not be able to
56021+ connect to abstract (meaning not belonging to a filesystem) Unix
56022+ domain sockets that were bound outside of a chroot. It is recommended
56023+ that you say Y here. If the sysctl option is enabled, a sysctl option
56024+ with name "chroot_deny_unix" is created.
56025+
56026+config GRKERNSEC_CHROOT_FINDTASK
56027+ bool "Protect outside processes"
56028+ depends on GRKERNSEC_CHROOT
56029+ help
56030+ If you say Y here, processes inside a chroot will not be able to
56031+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56032+ getsid, or view any process outside of the chroot. If the sysctl
56033+ option is enabled, a sysctl option with name "chroot_findtask" is
56034+ created.
56035+
56036+config GRKERNSEC_CHROOT_NICE
56037+ bool "Restrict priority changes"
56038+ depends on GRKERNSEC_CHROOT
56039+ help
56040+ If you say Y here, processes inside a chroot will not be able to raise
56041+ the priority of processes in the chroot, or alter the priority of
56042+ processes outside the chroot. This provides more security than simply
56043+ removing CAP_SYS_NICE from the process' capability set. If the
56044+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56045+ is created.
56046+
56047+config GRKERNSEC_CHROOT_SYSCTL
56048+ bool "Deny sysctl writes"
56049+ depends on GRKERNSEC_CHROOT
56050+ help
56051+ If you say Y here, an attacker in a chroot will not be able to
56052+ write to sysctl entries, either by sysctl(2) or through a /proc
56053+ interface. It is strongly recommended that you say Y here. If the
56054+ sysctl option is enabled, a sysctl option with name
56055+ "chroot_deny_sysctl" is created.
56056+
56057+config GRKERNSEC_CHROOT_CAPS
56058+ bool "Capability restrictions"
56059+ depends on GRKERNSEC_CHROOT
56060+ help
56061+ If you say Y here, the capabilities on all processes within a
56062+ chroot jail will be lowered to stop module insertion, raw i/o,
56063+ system and net admin tasks, rebooting the system, modifying immutable
56064+ files, modifying IPC owned by another, and changing the system time.
56065+ This is left an option because it can break some apps. Disable this
56066+ if your chrooted apps are having problems performing those kinds of
56067+ tasks. If the sysctl option is enabled, a sysctl option with
56068+ name "chroot_caps" is created.
56069+
56070+endmenu
56071+menu "Kernel Auditing"
56072+depends on GRKERNSEC
56073+
56074+config GRKERNSEC_AUDIT_GROUP
56075+ bool "Single group for auditing"
56076+ help
56077+ If you say Y here, the exec, chdir, and (un)mount logging features
56078+ will only operate on a group you specify. This option is recommended
56079+ if you only want to watch certain users instead of having a large
56080+ amount of logs from the entire system. If the sysctl option is enabled,
56081+ a sysctl option with name "audit_group" is created.
56082+
56083+config GRKERNSEC_AUDIT_GID
56084+ int "GID for auditing"
56085+ depends on GRKERNSEC_AUDIT_GROUP
56086+ default 1007
56087+
56088+config GRKERNSEC_EXECLOG
56089+ bool "Exec logging"
56090+ help
56091+ If you say Y here, all execve() calls will be logged (since the
56092+ other exec*() calls are frontends to execve(), all execution
56093+ will be logged). Useful for shell-servers that like to keep track
56094+ of their users. If the sysctl option is enabled, a sysctl option with
56095+ name "exec_logging" is created.
56096+ WARNING: This option when enabled will produce a LOT of logs, especially
56097+ on an active system.
56098+
56099+config GRKERNSEC_RESLOG
56100+ bool "Resource logging"
56101+ help
56102+ If you say Y here, all attempts to overstep resource limits will
56103+ be logged with the resource name, the requested size, and the current
56104+ limit. It is highly recommended that you say Y here. If the sysctl
56105+ option is enabled, a sysctl option with name "resource_logging" is
56106+ created. If the RBAC system is enabled, the sysctl value is ignored.
56107+
56108+config GRKERNSEC_CHROOT_EXECLOG
56109+ bool "Log execs within chroot"
56110+ help
56111+ If you say Y here, all executions inside a chroot jail will be logged
56112+ to syslog. This can cause a large amount of logs if certain
56113+ applications (eg. djb's daemontools) are installed on the system, and
56114+ is therefore left as an option. If the sysctl option is enabled, a
56115+ sysctl option with name "chroot_execlog" is created.
56116+
56117+config GRKERNSEC_AUDIT_PTRACE
56118+ bool "Ptrace logging"
56119+ help
56120+ If you say Y here, all attempts to attach to a process via ptrace
56121+ will be logged. If the sysctl option is enabled, a sysctl option
56122+ with name "audit_ptrace" is created.
56123+
56124+config GRKERNSEC_AUDIT_CHDIR
56125+ bool "Chdir logging"
56126+ help
56127+ If you say Y here, all chdir() calls will be logged. If the sysctl
56128+ option is enabled, a sysctl option with name "audit_chdir" is created.
56129+
56130+config GRKERNSEC_AUDIT_MOUNT
56131+ bool "(Un)Mount logging"
56132+ help
56133+ If you say Y here, all mounts and unmounts will be logged. If the
56134+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56135+ created.
56136+
56137+config GRKERNSEC_SIGNAL
56138+ bool "Signal logging"
56139+ help
56140+ If you say Y here, certain important signals will be logged, such as
56141+ SIGSEGV, which will as a result inform you of when a error in a program
56142+ occurred, which in some cases could mean a possible exploit attempt.
56143+ If the sysctl option is enabled, a sysctl option with name
56144+ "signal_logging" is created.
56145+
56146+config GRKERNSEC_FORKFAIL
56147+ bool "Fork failure logging"
56148+ help
56149+ If you say Y here, all failed fork() attempts will be logged.
56150+ This could suggest a fork bomb, or someone attempting to overstep
56151+ their process limit. If the sysctl option is enabled, a sysctl option
56152+ with name "forkfail_logging" is created.
56153+
56154+config GRKERNSEC_TIME
56155+ bool "Time change logging"
56156+ help
56157+ If you say Y here, any changes of the system clock will be logged.
56158+ If the sysctl option is enabled, a sysctl option with name
56159+ "timechange_logging" is created.
56160+
56161+config GRKERNSEC_PROC_IPADDR
56162+ bool "/proc/<pid>/ipaddr support"
56163+ help
56164+ If you say Y here, a new entry will be added to each /proc/<pid>
56165+ directory that contains the IP address of the person using the task.
56166+ The IP is carried across local TCP and AF_UNIX stream sockets.
56167+ This information can be useful for IDS/IPSes to perform remote response
56168+ to a local attack. The entry is readable by only the owner of the
56169+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56170+ the RBAC system), and thus does not create privacy concerns.
56171+
56172+config GRKERNSEC_RWXMAP_LOG
56173+ bool 'Denied RWX mmap/mprotect logging'
56174+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56175+ help
56176+ If you say Y here, calls to mmap() and mprotect() with explicit
56177+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56178+ denied by the PAX_MPROTECT feature. If the sysctl option is
56179+ enabled, a sysctl option with name "rwxmap_logging" is created.
56180+
56181+config GRKERNSEC_AUDIT_TEXTREL
56182+ bool 'ELF text relocations logging (READ HELP)'
56183+ depends on PAX_MPROTECT
56184+ help
56185+ If you say Y here, text relocations will be logged with the filename
56186+ of the offending library or binary. The purpose of the feature is
56187+ to help Linux distribution developers get rid of libraries and
56188+ binaries that need text relocations which hinder the future progress
56189+ of PaX. Only Linux distribution developers should say Y here, and
56190+ never on a production machine, as this option creates an information
56191+ leak that could aid an attacker in defeating the randomization of
56192+ a single memory region. If the sysctl option is enabled, a sysctl
56193+ option with name "audit_textrel" is created.
56194+
56195+endmenu
56196+
56197+menu "Executable Protections"
56198+depends on GRKERNSEC
56199+
56200+config GRKERNSEC_DMESG
56201+ bool "Dmesg(8) restriction"
56202+ help
56203+ If you say Y here, non-root users will not be able to use dmesg(8)
56204+ to view up to the last 4kb of messages in the kernel's log buffer.
56205+ The kernel's log buffer often contains kernel addresses and other
56206+ identifying information useful to an attacker in fingerprinting a
56207+ system for a targeted exploit.
56208+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56209+ created.
56210+
56211+config GRKERNSEC_HARDEN_PTRACE
56212+ bool "Deter ptrace-based process snooping"
56213+ help
56214+ If you say Y here, TTY sniffers and other malicious monitoring
56215+ programs implemented through ptrace will be defeated. If you
56216+ have been using the RBAC system, this option has already been
56217+ enabled for several years for all users, with the ability to make
56218+ fine-grained exceptions.
56219+
56220+ This option only affects the ability of non-root users to ptrace
56221+ processes that are not a descendent of the ptracing process.
56222+ This means that strace ./binary and gdb ./binary will still work,
56223+ but attaching to arbitrary processes will not. If the sysctl
56224+ option is enabled, a sysctl option with name "harden_ptrace" is
56225+ created.
56226+
56227+config GRKERNSEC_TPE
56228+ bool "Trusted Path Execution (TPE)"
56229+ help
56230+ If you say Y here, you will be able to choose a gid to add to the
56231+ supplementary groups of users you want to mark as "untrusted."
56232+ These users will not be able to execute any files that are not in
56233+ root-owned directories writable only by root. If the sysctl option
56234+ is enabled, a sysctl option with name "tpe" is created.
56235+
56236+config GRKERNSEC_TPE_ALL
56237+ bool "Partially restrict all non-root users"
56238+ depends on GRKERNSEC_TPE
56239+ help
56240+ If you say Y here, all non-root users will be covered under
56241+ a weaker TPE restriction. This is separate from, and in addition to,
56242+ the main TPE options that you have selected elsewhere. Thus, if a
56243+ "trusted" GID is chosen, this restriction applies to even that GID.
56244+ Under this restriction, all non-root users will only be allowed to
56245+ execute files in directories they own that are not group or
56246+ world-writable, or in directories owned by root and writable only by
56247+ root. If the sysctl option is enabled, a sysctl option with name
56248+ "tpe_restrict_all" is created.
56249+
56250+config GRKERNSEC_TPE_INVERT
56251+ bool "Invert GID option"
56252+ depends on GRKERNSEC_TPE
56253+ help
56254+ If you say Y here, the group you specify in the TPE configuration will
56255+ decide what group TPE restrictions will be *disabled* for. This
56256+ option is useful if you want TPE restrictions to be applied to most
56257+ users on the system. If the sysctl option is enabled, a sysctl option
56258+ with name "tpe_invert" is created. Unlike other sysctl options, this
56259+ entry will default to on for backward-compatibility.
56260+
56261+config GRKERNSEC_TPE_GID
56262+ int "GID for untrusted users"
56263+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56264+ default 1005
56265+ help
56266+ Setting this GID determines what group TPE restrictions will be
56267+ *enabled* for. If the sysctl option is enabled, a sysctl option
56268+ with name "tpe_gid" is created.
56269+
56270+config GRKERNSEC_TPE_GID
56271+ int "GID for trusted users"
56272+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56273+ default 1005
56274+ help
56275+ Setting this GID determines what group TPE restrictions will be
56276+ *disabled* for. If the sysctl option is enabled, a sysctl option
56277+ with name "tpe_gid" is created.
56278+
56279+endmenu
56280+menu "Network Protections"
56281+depends on GRKERNSEC
56282+
56283+config GRKERNSEC_RANDNET
56284+ bool "Larger entropy pools"
56285+ help
56286+ If you say Y here, the entropy pools used for many features of Linux
56287+ and grsecurity will be doubled in size. Since several grsecurity
56288+ features use additional randomness, it is recommended that you say Y
56289+ here. Saying Y here has a similar effect as modifying
56290+ /proc/sys/kernel/random/poolsize.
56291+
56292+config GRKERNSEC_BLACKHOLE
56293+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56294+ depends on NET
56295+ help
56296+ If you say Y here, neither TCP resets nor ICMP
56297+ destination-unreachable packets will be sent in response to packets
56298+ sent to ports for which no associated listening process exists.
56299+ This feature supports both IPV4 and IPV6 and exempts the
56300+ loopback interface from blackholing. Enabling this feature
56301+ makes a host more resilient to DoS attacks and reduces network
56302+ visibility against scanners.
56303+
56304+ The blackhole feature as-implemented is equivalent to the FreeBSD
56305+ blackhole feature, as it prevents RST responses to all packets, not
56306+ just SYNs. Under most application behavior this causes no
56307+ problems, but applications (like haproxy) may not close certain
56308+ connections in a way that cleanly terminates them on the remote
56309+ end, leaving the remote host in LAST_ACK state. Because of this
56310+ side-effect and to prevent intentional LAST_ACK DoSes, this
56311+ feature also adds automatic mitigation against such attacks.
56312+ The mitigation drastically reduces the amount of time a socket
56313+ can spend in LAST_ACK state. If you're using haproxy and not
56314+ all servers it connects to have this option enabled, consider
56315+ disabling this feature on the haproxy host.
56316+
56317+ If the sysctl option is enabled, two sysctl options with names
56318+ "ip_blackhole" and "lastack_retries" will be created.
56319+ While "ip_blackhole" takes the standard zero/non-zero on/off
56320+ toggle, "lastack_retries" uses the same kinds of values as
56321+ "tcp_retries1" and "tcp_retries2". The default value of 4
56322+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56323+ state.
56324+
56325+config GRKERNSEC_SOCKET
56326+ bool "Socket restrictions"
56327+ depends on NET
56328+ help
56329+ If you say Y here, you will be able to choose from several options.
56330+ If you assign a GID on your system and add it to the supplementary
56331+ groups of users you want to restrict socket access to, this patch
56332+ will perform up to three things, based on the option(s) you choose.
56333+
56334+config GRKERNSEC_SOCKET_ALL
56335+ bool "Deny any sockets to group"
56336+ depends on GRKERNSEC_SOCKET
56337+ help
56338+ If you say Y here, you will be able to choose a GID of whose users will
56339+ be unable to connect to other hosts from your machine or run server
56340+ applications from your machine. If the sysctl option is enabled, a
56341+ sysctl option with name "socket_all" is created.
56342+
56343+config GRKERNSEC_SOCKET_ALL_GID
56344+ int "GID to deny all sockets for"
56345+ depends on GRKERNSEC_SOCKET_ALL
56346+ default 1004
56347+ help
56348+ Here you can choose the GID to disable socket access for. Remember to
56349+ add the users you want socket access disabled for to the GID
56350+ specified here. If the sysctl option is enabled, a sysctl option
56351+ with name "socket_all_gid" is created.
56352+
56353+config GRKERNSEC_SOCKET_CLIENT
56354+ bool "Deny client sockets to group"
56355+ depends on GRKERNSEC_SOCKET
56356+ help
56357+ If you say Y here, you will be able to choose a GID of whose users will
56358+ be unable to connect to other hosts from your machine, but will be
56359+ able to run servers. If this option is enabled, all users in the group
56360+ you specify will have to use passive mode when initiating ftp transfers
56361+ from the shell on your machine. If the sysctl option is enabled, a
56362+ sysctl option with name "socket_client" is created.
56363+
56364+config GRKERNSEC_SOCKET_CLIENT_GID
56365+ int "GID to deny client sockets for"
56366+ depends on GRKERNSEC_SOCKET_CLIENT
56367+ default 1003
56368+ help
56369+ Here you can choose the GID to disable client socket access for.
56370+ Remember to add the users you want client socket access disabled for to
56371+ the GID specified here. If the sysctl option is enabled, a sysctl
56372+ option with name "socket_client_gid" is created.
56373+
56374+config GRKERNSEC_SOCKET_SERVER
56375+ bool "Deny server sockets to group"
56376+ depends on GRKERNSEC_SOCKET
56377+ help
56378+ If you say Y here, you will be able to choose a GID of whose users will
56379+ be unable to run server applications from your machine. If the sysctl
56380+ option is enabled, a sysctl option with name "socket_server" is created.
56381+
56382+config GRKERNSEC_SOCKET_SERVER_GID
56383+ int "GID to deny server sockets for"
56384+ depends on GRKERNSEC_SOCKET_SERVER
56385+ default 1002
56386+ help
56387+ Here you can choose the GID to disable server socket access for.
56388+ Remember to add the users you want server socket access disabled for to
56389+ the GID specified here. If the sysctl option is enabled, a sysctl
56390+ option with name "socket_server_gid" is created.
56391+
56392+endmenu
56393+menu "Sysctl support"
56394+depends on GRKERNSEC && SYSCTL
56395+
56396+config GRKERNSEC_SYSCTL
56397+ bool "Sysctl support"
56398+ help
56399+ If you say Y here, you will be able to change the options that
56400+ grsecurity runs with at bootup, without having to recompile your
56401+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56402+ to enable (1) or disable (0) various features. All the sysctl entries
56403+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56404+ All features enabled in the kernel configuration are disabled at boot
56405+ if you do not say Y to the "Turn on features by default" option.
56406+ All options should be set at startup, and the grsec_lock entry should
56407+ be set to a non-zero value after all the options are set.
56408+ *THIS IS EXTREMELY IMPORTANT*
56409+
56410+config GRKERNSEC_SYSCTL_DISTRO
56411+ bool "Extra sysctl support for distro makers (READ HELP)"
56412+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56413+ help
56414+ If you say Y here, additional sysctl options will be created
56415+ for features that affect processes running as root. Therefore,
56416+ it is critical when using this option that the grsec_lock entry be
56417+ enabled after boot. Only distros with prebuilt kernel packages
56418+ with this option enabled that can ensure grsec_lock is enabled
56419+ after boot should use this option.
56420+ *Failure to set grsec_lock after boot makes all grsec features
56421+ this option covers useless*
56422+
56423+ Currently this option creates the following sysctl entries:
56424+ "Disable Privileged I/O": "disable_priv_io"
56425+
56426+config GRKERNSEC_SYSCTL_ON
56427+ bool "Turn on features by default"
56428+ depends on GRKERNSEC_SYSCTL
56429+ help
56430+ If you say Y here, instead of having all features enabled in the
56431+ kernel configuration disabled at boot time, the features will be
56432+ enabled at boot time. It is recommended you say Y here unless
56433+ there is some reason you would want all sysctl-tunable features to
56434+ be disabled by default. As mentioned elsewhere, it is important
56435+ to enable the grsec_lock entry once you have finished modifying
56436+ the sysctl entries.
56437+
56438+endmenu
56439+menu "Logging Options"
56440+depends on GRKERNSEC
56441+
56442+config GRKERNSEC_FLOODTIME
56443+ int "Seconds in between log messages (minimum)"
56444+ default 10
56445+ help
56446+ This option allows you to enforce the number of seconds between
56447+ grsecurity log messages. The default should be suitable for most
56448+ people, however, if you choose to change it, choose a value small enough
56449+ to allow informative logs to be produced, but large enough to
56450+ prevent flooding.
56451+
56452+config GRKERNSEC_FLOODBURST
56453+ int "Number of messages in a burst (maximum)"
56454+ default 6
56455+ help
56456+ This option allows you to choose the maximum number of messages allowed
56457+ within the flood time interval you chose in a separate option. The
56458+ default should be suitable for most people, however if you find that
56459+ many of your logs are being interpreted as flooding, you may want to
56460+ raise this value.
56461+
56462+endmenu
56463+
56464+endmenu
56465diff -urNp linux-3.0.7/grsecurity/Makefile linux-3.0.7/grsecurity/Makefile
56466--- linux-3.0.7/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56467+++ linux-3.0.7/grsecurity/Makefile 2011-10-17 06:45:43.000000000 -0400
56468@@ -0,0 +1,36 @@
56469+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56470+# during 2001-2009 it has been completely redesigned by Brad Spengler
56471+# into an RBAC system
56472+#
56473+# All code in this directory and various hooks inserted throughout the kernel
56474+# are copyright Brad Spengler - Open Source Security, Inc., and released
56475+# under the GPL v2 or higher
56476+
56477+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56478+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56479+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56480+
56481+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56482+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56483+ gracl_learn.o grsec_log.o
56484+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56485+
56486+ifdef CONFIG_NET
56487+obj-y += grsec_sock.o
56488+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56489+endif
56490+
56491+ifndef CONFIG_GRKERNSEC
56492+obj-y += grsec_disabled.o
56493+endif
56494+
56495+ifdef CONFIG_GRKERNSEC_HIDESYM
56496+extra-y := grsec_hidesym.o
56497+$(obj)/grsec_hidesym.o:
56498+ @-chmod -f 500 /boot
56499+ @-chmod -f 500 /lib/modules
56500+ @-chmod -f 500 /lib64/modules
56501+ @-chmod -f 500 /lib32/modules
56502+ @-chmod -f 700 .
56503+ @echo ' grsec: protected kernel image paths'
56504+endif
56505diff -urNp linux-3.0.7/include/acpi/acpi_bus.h linux-3.0.7/include/acpi/acpi_bus.h
56506--- linux-3.0.7/include/acpi/acpi_bus.h 2011-07-21 22:17:23.000000000 -0400
56507+++ linux-3.0.7/include/acpi/acpi_bus.h 2011-08-23 21:47:56.000000000 -0400
56508@@ -107,7 +107,7 @@ struct acpi_device_ops {
56509 acpi_op_bind bind;
56510 acpi_op_unbind unbind;
56511 acpi_op_notify notify;
56512-};
56513+} __no_const;
56514
56515 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56516
56517diff -urNp linux-3.0.7/include/asm-generic/atomic-long.h linux-3.0.7/include/asm-generic/atomic-long.h
56518--- linux-3.0.7/include/asm-generic/atomic-long.h 2011-07-21 22:17:23.000000000 -0400
56519+++ linux-3.0.7/include/asm-generic/atomic-long.h 2011-08-23 21:47:56.000000000 -0400
56520@@ -22,6 +22,12 @@
56521
56522 typedef atomic64_t atomic_long_t;
56523
56524+#ifdef CONFIG_PAX_REFCOUNT
56525+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56526+#else
56527+typedef atomic64_t atomic_long_unchecked_t;
56528+#endif
56529+
56530 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56531
56532 static inline long atomic_long_read(atomic_long_t *l)
56533@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56534 return (long)atomic64_read(v);
56535 }
56536
56537+#ifdef CONFIG_PAX_REFCOUNT
56538+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56539+{
56540+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56541+
56542+ return (long)atomic64_read_unchecked(v);
56543+}
56544+#endif
56545+
56546 static inline void atomic_long_set(atomic_long_t *l, long i)
56547 {
56548 atomic64_t *v = (atomic64_t *)l;
56549@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56550 atomic64_set(v, i);
56551 }
56552
56553+#ifdef CONFIG_PAX_REFCOUNT
56554+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56555+{
56556+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56557+
56558+ atomic64_set_unchecked(v, i);
56559+}
56560+#endif
56561+
56562 static inline void atomic_long_inc(atomic_long_t *l)
56563 {
56564 atomic64_t *v = (atomic64_t *)l;
56565@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56566 atomic64_inc(v);
56567 }
56568
56569+#ifdef CONFIG_PAX_REFCOUNT
56570+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56571+{
56572+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56573+
56574+ atomic64_inc_unchecked(v);
56575+}
56576+#endif
56577+
56578 static inline void atomic_long_dec(atomic_long_t *l)
56579 {
56580 atomic64_t *v = (atomic64_t *)l;
56581@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56582 atomic64_dec(v);
56583 }
56584
56585+#ifdef CONFIG_PAX_REFCOUNT
56586+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56587+{
56588+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56589+
56590+ atomic64_dec_unchecked(v);
56591+}
56592+#endif
56593+
56594 static inline void atomic_long_add(long i, atomic_long_t *l)
56595 {
56596 atomic64_t *v = (atomic64_t *)l;
56597@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56598 atomic64_add(i, v);
56599 }
56600
56601+#ifdef CONFIG_PAX_REFCOUNT
56602+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56603+{
56604+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56605+
56606+ atomic64_add_unchecked(i, v);
56607+}
56608+#endif
56609+
56610 static inline void atomic_long_sub(long i, atomic_long_t *l)
56611 {
56612 atomic64_t *v = (atomic64_t *)l;
56613@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
56614 atomic64_sub(i, v);
56615 }
56616
56617+#ifdef CONFIG_PAX_REFCOUNT
56618+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56619+{
56620+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56621+
56622+ atomic64_sub_unchecked(i, v);
56623+}
56624+#endif
56625+
56626 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56627 {
56628 atomic64_t *v = (atomic64_t *)l;
56629@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
56630 return (long)atomic64_inc_return(v);
56631 }
56632
56633+#ifdef CONFIG_PAX_REFCOUNT
56634+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56635+{
56636+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56637+
56638+ return (long)atomic64_inc_return_unchecked(v);
56639+}
56640+#endif
56641+
56642 static inline long atomic_long_dec_return(atomic_long_t *l)
56643 {
56644 atomic64_t *v = (atomic64_t *)l;
56645@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
56646
56647 typedef atomic_t atomic_long_t;
56648
56649+#ifdef CONFIG_PAX_REFCOUNT
56650+typedef atomic_unchecked_t atomic_long_unchecked_t;
56651+#else
56652+typedef atomic_t atomic_long_unchecked_t;
56653+#endif
56654+
56655 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56656 static inline long atomic_long_read(atomic_long_t *l)
56657 {
56658@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
56659 return (long)atomic_read(v);
56660 }
56661
56662+#ifdef CONFIG_PAX_REFCOUNT
56663+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56664+{
56665+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56666+
56667+ return (long)atomic_read_unchecked(v);
56668+}
56669+#endif
56670+
56671 static inline void atomic_long_set(atomic_long_t *l, long i)
56672 {
56673 atomic_t *v = (atomic_t *)l;
56674@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
56675 atomic_set(v, i);
56676 }
56677
56678+#ifdef CONFIG_PAX_REFCOUNT
56679+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56680+{
56681+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56682+
56683+ atomic_set_unchecked(v, i);
56684+}
56685+#endif
56686+
56687 static inline void atomic_long_inc(atomic_long_t *l)
56688 {
56689 atomic_t *v = (atomic_t *)l;
56690@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
56691 atomic_inc(v);
56692 }
56693
56694+#ifdef CONFIG_PAX_REFCOUNT
56695+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56696+{
56697+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56698+
56699+ atomic_inc_unchecked(v);
56700+}
56701+#endif
56702+
56703 static inline void atomic_long_dec(atomic_long_t *l)
56704 {
56705 atomic_t *v = (atomic_t *)l;
56706@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
56707 atomic_dec(v);
56708 }
56709
56710+#ifdef CONFIG_PAX_REFCOUNT
56711+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56712+{
56713+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56714+
56715+ atomic_dec_unchecked(v);
56716+}
56717+#endif
56718+
56719 static inline void atomic_long_add(long i, atomic_long_t *l)
56720 {
56721 atomic_t *v = (atomic_t *)l;
56722@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
56723 atomic_add(i, v);
56724 }
56725
56726+#ifdef CONFIG_PAX_REFCOUNT
56727+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56728+{
56729+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56730+
56731+ atomic_add_unchecked(i, v);
56732+}
56733+#endif
56734+
56735 static inline void atomic_long_sub(long i, atomic_long_t *l)
56736 {
56737 atomic_t *v = (atomic_t *)l;
56738@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
56739 atomic_sub(i, v);
56740 }
56741
56742+#ifdef CONFIG_PAX_REFCOUNT
56743+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
56744+{
56745+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56746+
56747+ atomic_sub_unchecked(i, v);
56748+}
56749+#endif
56750+
56751 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
56752 {
56753 atomic_t *v = (atomic_t *)l;
56754@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
56755 return (long)atomic_inc_return(v);
56756 }
56757
56758+#ifdef CONFIG_PAX_REFCOUNT
56759+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56760+{
56761+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56762+
56763+ return (long)atomic_inc_return_unchecked(v);
56764+}
56765+#endif
56766+
56767 static inline long atomic_long_dec_return(atomic_long_t *l)
56768 {
56769 atomic_t *v = (atomic_t *)l;
56770@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
56771
56772 #endif /* BITS_PER_LONG == 64 */
56773
56774+#ifdef CONFIG_PAX_REFCOUNT
56775+static inline void pax_refcount_needs_these_functions(void)
56776+{
56777+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56778+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56779+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56780+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56781+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56782+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56783+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56784+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56785+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56786+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56787+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56788+
56789+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56790+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56791+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56792+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
56793+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56794+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56795+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56796+}
56797+#else
56798+#define atomic_read_unchecked(v) atomic_read(v)
56799+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56800+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56801+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56802+#define atomic_inc_unchecked(v) atomic_inc(v)
56803+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56804+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56805+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56806+#define atomic_dec_unchecked(v) atomic_dec(v)
56807+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56808+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56809+
56810+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56811+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56812+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56813+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
56814+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56815+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56816+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56817+#endif
56818+
56819 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56820diff -urNp linux-3.0.7/include/asm-generic/cache.h linux-3.0.7/include/asm-generic/cache.h
56821--- linux-3.0.7/include/asm-generic/cache.h 2011-07-21 22:17:23.000000000 -0400
56822+++ linux-3.0.7/include/asm-generic/cache.h 2011-08-23 21:47:56.000000000 -0400
56823@@ -6,7 +6,7 @@
56824 * cache lines need to provide their own cache.h.
56825 */
56826
56827-#define L1_CACHE_SHIFT 5
56828-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56829+#define L1_CACHE_SHIFT 5UL
56830+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56831
56832 #endif /* __ASM_GENERIC_CACHE_H */
56833diff -urNp linux-3.0.7/include/asm-generic/int-l64.h linux-3.0.7/include/asm-generic/int-l64.h
56834--- linux-3.0.7/include/asm-generic/int-l64.h 2011-07-21 22:17:23.000000000 -0400
56835+++ linux-3.0.7/include/asm-generic/int-l64.h 2011-08-23 21:47:56.000000000 -0400
56836@@ -46,6 +46,8 @@ typedef unsigned int u32;
56837 typedef signed long s64;
56838 typedef unsigned long u64;
56839
56840+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56841+
56842 #define S8_C(x) x
56843 #define U8_C(x) x ## U
56844 #define S16_C(x) x
56845diff -urNp linux-3.0.7/include/asm-generic/int-ll64.h linux-3.0.7/include/asm-generic/int-ll64.h
56846--- linux-3.0.7/include/asm-generic/int-ll64.h 2011-07-21 22:17:23.000000000 -0400
56847+++ linux-3.0.7/include/asm-generic/int-ll64.h 2011-08-23 21:47:56.000000000 -0400
56848@@ -51,6 +51,8 @@ typedef unsigned int u32;
56849 typedef signed long long s64;
56850 typedef unsigned long long u64;
56851
56852+typedef unsigned long long intoverflow_t;
56853+
56854 #define S8_C(x) x
56855 #define U8_C(x) x ## U
56856 #define S16_C(x) x
56857diff -urNp linux-3.0.7/include/asm-generic/kmap_types.h linux-3.0.7/include/asm-generic/kmap_types.h
56858--- linux-3.0.7/include/asm-generic/kmap_types.h 2011-07-21 22:17:23.000000000 -0400
56859+++ linux-3.0.7/include/asm-generic/kmap_types.h 2011-08-23 21:47:56.000000000 -0400
56860@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
56861 KMAP_D(17) KM_NMI,
56862 KMAP_D(18) KM_NMI_PTE,
56863 KMAP_D(19) KM_KDB,
56864+KMAP_D(20) KM_CLEARPAGE,
56865 /*
56866 * Remember to update debug_kmap_atomic() when adding new kmap types!
56867 */
56868-KMAP_D(20) KM_TYPE_NR
56869+KMAP_D(21) KM_TYPE_NR
56870 };
56871
56872 #undef KMAP_D
56873diff -urNp linux-3.0.7/include/asm-generic/pgtable.h linux-3.0.7/include/asm-generic/pgtable.h
56874--- linux-3.0.7/include/asm-generic/pgtable.h 2011-07-21 22:17:23.000000000 -0400
56875+++ linux-3.0.7/include/asm-generic/pgtable.h 2011-08-23 21:47:56.000000000 -0400
56876@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
56877 #endif /* __HAVE_ARCH_PMD_WRITE */
56878 #endif
56879
56880+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56881+static inline unsigned long pax_open_kernel(void) { return 0; }
56882+#endif
56883+
56884+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56885+static inline unsigned long pax_close_kernel(void) { return 0; }
56886+#endif
56887+
56888 #endif /* !__ASSEMBLY__ */
56889
56890 #endif /* _ASM_GENERIC_PGTABLE_H */
56891diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopmd.h linux-3.0.7/include/asm-generic/pgtable-nopmd.h
56892--- linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-07-21 22:17:23.000000000 -0400
56893+++ linux-3.0.7/include/asm-generic/pgtable-nopmd.h 2011-08-23 21:47:56.000000000 -0400
56894@@ -1,14 +1,19 @@
56895 #ifndef _PGTABLE_NOPMD_H
56896 #define _PGTABLE_NOPMD_H
56897
56898-#ifndef __ASSEMBLY__
56899-
56900 #include <asm-generic/pgtable-nopud.h>
56901
56902-struct mm_struct;
56903-
56904 #define __PAGETABLE_PMD_FOLDED
56905
56906+#define PMD_SHIFT PUD_SHIFT
56907+#define PTRS_PER_PMD 1
56908+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56909+#define PMD_MASK (~(PMD_SIZE-1))
56910+
56911+#ifndef __ASSEMBLY__
56912+
56913+struct mm_struct;
56914+
56915 /*
56916 * Having the pmd type consist of a pud gets the size right, and allows
56917 * us to conceptually access the pud entry that this pmd is folded into
56918@@ -16,11 +21,6 @@ struct mm_struct;
56919 */
56920 typedef struct { pud_t pud; } pmd_t;
56921
56922-#define PMD_SHIFT PUD_SHIFT
56923-#define PTRS_PER_PMD 1
56924-#define PMD_SIZE (1UL << PMD_SHIFT)
56925-#define PMD_MASK (~(PMD_SIZE-1))
56926-
56927 /*
56928 * The "pud_xxx()" functions here are trivial for a folded two-level
56929 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56930diff -urNp linux-3.0.7/include/asm-generic/pgtable-nopud.h linux-3.0.7/include/asm-generic/pgtable-nopud.h
56931--- linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-07-21 22:17:23.000000000 -0400
56932+++ linux-3.0.7/include/asm-generic/pgtable-nopud.h 2011-08-23 21:47:56.000000000 -0400
56933@@ -1,10 +1,15 @@
56934 #ifndef _PGTABLE_NOPUD_H
56935 #define _PGTABLE_NOPUD_H
56936
56937-#ifndef __ASSEMBLY__
56938-
56939 #define __PAGETABLE_PUD_FOLDED
56940
56941+#define PUD_SHIFT PGDIR_SHIFT
56942+#define PTRS_PER_PUD 1
56943+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56944+#define PUD_MASK (~(PUD_SIZE-1))
56945+
56946+#ifndef __ASSEMBLY__
56947+
56948 /*
56949 * Having the pud type consist of a pgd gets the size right, and allows
56950 * us to conceptually access the pgd entry that this pud is folded into
56951@@ -12,11 +17,6 @@
56952 */
56953 typedef struct { pgd_t pgd; } pud_t;
56954
56955-#define PUD_SHIFT PGDIR_SHIFT
56956-#define PTRS_PER_PUD 1
56957-#define PUD_SIZE (1UL << PUD_SHIFT)
56958-#define PUD_MASK (~(PUD_SIZE-1))
56959-
56960 /*
56961 * The "pgd_xxx()" functions here are trivial for a folded two-level
56962 * setup: the pud is never bad, and a pud always exists (as it's folded
56963diff -urNp linux-3.0.7/include/asm-generic/vmlinux.lds.h linux-3.0.7/include/asm-generic/vmlinux.lds.h
56964--- linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-07-21 22:17:23.000000000 -0400
56965+++ linux-3.0.7/include/asm-generic/vmlinux.lds.h 2011-08-23 21:47:56.000000000 -0400
56966@@ -217,6 +217,7 @@
56967 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56968 VMLINUX_SYMBOL(__start_rodata) = .; \
56969 *(.rodata) *(.rodata.*) \
56970+ *(.data..read_only) \
56971 *(__vermagic) /* Kernel version magic */ \
56972 . = ALIGN(8); \
56973 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
56974@@ -723,17 +724,18 @@
56975 * section in the linker script will go there too. @phdr should have
56976 * a leading colon.
56977 *
56978- * Note that this macros defines __per_cpu_load as an absolute symbol.
56979+ * Note that this macros defines per_cpu_load as an absolute symbol.
56980 * If there is no need to put the percpu section at a predetermined
56981 * address, use PERCPU_SECTION.
56982 */
56983 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
56984- VMLINUX_SYMBOL(__per_cpu_load) = .; \
56985- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56986+ per_cpu_load = .; \
56987+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56988 - LOAD_OFFSET) { \
56989+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56990 PERCPU_INPUT(cacheline) \
56991 } phdr \
56992- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
56993+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
56994
56995 /**
56996 * PERCPU_SECTION - define output section for percpu area, simple version
56997diff -urNp linux-3.0.7/include/drm/drm_crtc_helper.h linux-3.0.7/include/drm/drm_crtc_helper.h
56998--- linux-3.0.7/include/drm/drm_crtc_helper.h 2011-07-21 22:17:23.000000000 -0400
56999+++ linux-3.0.7/include/drm/drm_crtc_helper.h 2011-08-23 21:47:56.000000000 -0400
57000@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57001
57002 /* disable crtc when not in use - more explicit than dpms off */
57003 void (*disable)(struct drm_crtc *crtc);
57004-};
57005+} __no_const;
57006
57007 struct drm_encoder_helper_funcs {
57008 void (*dpms)(struct drm_encoder *encoder, int mode);
57009@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57010 struct drm_connector *connector);
57011 /* disable encoder when not in use - more explicit than dpms off */
57012 void (*disable)(struct drm_encoder *encoder);
57013-};
57014+} __no_const;
57015
57016 struct drm_connector_helper_funcs {
57017 int (*get_modes)(struct drm_connector *connector);
57018diff -urNp linux-3.0.7/include/drm/drmP.h linux-3.0.7/include/drm/drmP.h
57019--- linux-3.0.7/include/drm/drmP.h 2011-07-21 22:17:23.000000000 -0400
57020+++ linux-3.0.7/include/drm/drmP.h 2011-08-23 21:47:56.000000000 -0400
57021@@ -73,6 +73,7 @@
57022 #include <linux/workqueue.h>
57023 #include <linux/poll.h>
57024 #include <asm/pgalloc.h>
57025+#include <asm/local.h>
57026 #include "drm.h"
57027
57028 #include <linux/idr.h>
57029@@ -1033,7 +1034,7 @@ struct drm_device {
57030
57031 /** \name Usage Counters */
57032 /*@{ */
57033- int open_count; /**< Outstanding files open */
57034+ local_t open_count; /**< Outstanding files open */
57035 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57036 atomic_t vma_count; /**< Outstanding vma areas open */
57037 int buf_use; /**< Buffers in use -- cannot alloc */
57038@@ -1044,7 +1045,7 @@ struct drm_device {
57039 /*@{ */
57040 unsigned long counters;
57041 enum drm_stat_type types[15];
57042- atomic_t counts[15];
57043+ atomic_unchecked_t counts[15];
57044 /*@} */
57045
57046 struct list_head filelist;
57047diff -urNp linux-3.0.7/include/drm/ttm/ttm_memory.h linux-3.0.7/include/drm/ttm/ttm_memory.h
57048--- linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-07-21 22:17:23.000000000 -0400
57049+++ linux-3.0.7/include/drm/ttm/ttm_memory.h 2011-08-23 21:47:56.000000000 -0400
57050@@ -47,7 +47,7 @@
57051
57052 struct ttm_mem_shrink {
57053 int (*do_shrink) (struct ttm_mem_shrink *);
57054-};
57055+} __no_const;
57056
57057 /**
57058 * struct ttm_mem_global - Global memory accounting structure.
57059diff -urNp linux-3.0.7/include/linux/a.out.h linux-3.0.7/include/linux/a.out.h
57060--- linux-3.0.7/include/linux/a.out.h 2011-07-21 22:17:23.000000000 -0400
57061+++ linux-3.0.7/include/linux/a.out.h 2011-08-23 21:47:56.000000000 -0400
57062@@ -39,6 +39,14 @@ enum machine_type {
57063 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57064 };
57065
57066+/* Constants for the N_FLAGS field */
57067+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57068+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57069+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57070+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57071+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57072+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57073+
57074 #if !defined (N_MAGIC)
57075 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57076 #endif
57077diff -urNp linux-3.0.7/include/linux/atmdev.h linux-3.0.7/include/linux/atmdev.h
57078--- linux-3.0.7/include/linux/atmdev.h 2011-07-21 22:17:23.000000000 -0400
57079+++ linux-3.0.7/include/linux/atmdev.h 2011-08-23 21:47:56.000000000 -0400
57080@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57081 #endif
57082
57083 struct k_atm_aal_stats {
57084-#define __HANDLE_ITEM(i) atomic_t i
57085+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57086 __AAL_STAT_ITEMS
57087 #undef __HANDLE_ITEM
57088 };
57089diff -urNp linux-3.0.7/include/linux/binfmts.h linux-3.0.7/include/linux/binfmts.h
57090--- linux-3.0.7/include/linux/binfmts.h 2011-07-21 22:17:23.000000000 -0400
57091+++ linux-3.0.7/include/linux/binfmts.h 2011-08-23 21:47:56.000000000 -0400
57092@@ -88,6 +88,7 @@ struct linux_binfmt {
57093 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57094 int (*load_shlib)(struct file *);
57095 int (*core_dump)(struct coredump_params *cprm);
57096+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57097 unsigned long min_coredump; /* minimal dump size */
57098 };
57099
57100diff -urNp linux-3.0.7/include/linux/blkdev.h linux-3.0.7/include/linux/blkdev.h
57101--- linux-3.0.7/include/linux/blkdev.h 2011-07-21 22:17:23.000000000 -0400
57102+++ linux-3.0.7/include/linux/blkdev.h 2011-08-26 19:49:56.000000000 -0400
57103@@ -1308,7 +1308,7 @@ struct block_device_operations {
57104 /* this callback is with swap_lock and sometimes page table lock held */
57105 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57106 struct module *owner;
57107-};
57108+} __do_const;
57109
57110 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57111 unsigned long);
57112diff -urNp linux-3.0.7/include/linux/blktrace_api.h linux-3.0.7/include/linux/blktrace_api.h
57113--- linux-3.0.7/include/linux/blktrace_api.h 2011-07-21 22:17:23.000000000 -0400
57114+++ linux-3.0.7/include/linux/blktrace_api.h 2011-08-23 21:47:56.000000000 -0400
57115@@ -161,7 +161,7 @@ struct blk_trace {
57116 struct dentry *dir;
57117 struct dentry *dropped_file;
57118 struct dentry *msg_file;
57119- atomic_t dropped;
57120+ atomic_unchecked_t dropped;
57121 };
57122
57123 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57124diff -urNp linux-3.0.7/include/linux/byteorder/little_endian.h linux-3.0.7/include/linux/byteorder/little_endian.h
57125--- linux-3.0.7/include/linux/byteorder/little_endian.h 2011-07-21 22:17:23.000000000 -0400
57126+++ linux-3.0.7/include/linux/byteorder/little_endian.h 2011-08-23 21:47:56.000000000 -0400
57127@@ -42,51 +42,51 @@
57128
57129 static inline __le64 __cpu_to_le64p(const __u64 *p)
57130 {
57131- return (__force __le64)*p;
57132+ return (__force const __le64)*p;
57133 }
57134 static inline __u64 __le64_to_cpup(const __le64 *p)
57135 {
57136- return (__force __u64)*p;
57137+ return (__force const __u64)*p;
57138 }
57139 static inline __le32 __cpu_to_le32p(const __u32 *p)
57140 {
57141- return (__force __le32)*p;
57142+ return (__force const __le32)*p;
57143 }
57144 static inline __u32 __le32_to_cpup(const __le32 *p)
57145 {
57146- return (__force __u32)*p;
57147+ return (__force const __u32)*p;
57148 }
57149 static inline __le16 __cpu_to_le16p(const __u16 *p)
57150 {
57151- return (__force __le16)*p;
57152+ return (__force const __le16)*p;
57153 }
57154 static inline __u16 __le16_to_cpup(const __le16 *p)
57155 {
57156- return (__force __u16)*p;
57157+ return (__force const __u16)*p;
57158 }
57159 static inline __be64 __cpu_to_be64p(const __u64 *p)
57160 {
57161- return (__force __be64)__swab64p(p);
57162+ return (__force const __be64)__swab64p(p);
57163 }
57164 static inline __u64 __be64_to_cpup(const __be64 *p)
57165 {
57166- return __swab64p((__u64 *)p);
57167+ return __swab64p((const __u64 *)p);
57168 }
57169 static inline __be32 __cpu_to_be32p(const __u32 *p)
57170 {
57171- return (__force __be32)__swab32p(p);
57172+ return (__force const __be32)__swab32p(p);
57173 }
57174 static inline __u32 __be32_to_cpup(const __be32 *p)
57175 {
57176- return __swab32p((__u32 *)p);
57177+ return __swab32p((const __u32 *)p);
57178 }
57179 static inline __be16 __cpu_to_be16p(const __u16 *p)
57180 {
57181- return (__force __be16)__swab16p(p);
57182+ return (__force const __be16)__swab16p(p);
57183 }
57184 static inline __u16 __be16_to_cpup(const __be16 *p)
57185 {
57186- return __swab16p((__u16 *)p);
57187+ return __swab16p((const __u16 *)p);
57188 }
57189 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57190 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57191diff -urNp linux-3.0.7/include/linux/cache.h linux-3.0.7/include/linux/cache.h
57192--- linux-3.0.7/include/linux/cache.h 2011-07-21 22:17:23.000000000 -0400
57193+++ linux-3.0.7/include/linux/cache.h 2011-08-23 21:47:56.000000000 -0400
57194@@ -16,6 +16,10 @@
57195 #define __read_mostly
57196 #endif
57197
57198+#ifndef __read_only
57199+#define __read_only __read_mostly
57200+#endif
57201+
57202 #ifndef ____cacheline_aligned
57203 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57204 #endif
57205diff -urNp linux-3.0.7/include/linux/capability.h linux-3.0.7/include/linux/capability.h
57206--- linux-3.0.7/include/linux/capability.h 2011-07-21 22:17:23.000000000 -0400
57207+++ linux-3.0.7/include/linux/capability.h 2011-08-23 21:48:14.000000000 -0400
57208@@ -547,6 +547,9 @@ extern bool capable(int cap);
57209 extern bool ns_capable(struct user_namespace *ns, int cap);
57210 extern bool task_ns_capable(struct task_struct *t, int cap);
57211 extern bool nsown_capable(int cap);
57212+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57213+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57214+extern bool capable_nolog(int cap);
57215
57216 /* audit system wants to get cap info from files as well */
57217 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57218diff -urNp linux-3.0.7/include/linux/cleancache.h linux-3.0.7/include/linux/cleancache.h
57219--- linux-3.0.7/include/linux/cleancache.h 2011-07-21 22:17:23.000000000 -0400
57220+++ linux-3.0.7/include/linux/cleancache.h 2011-08-23 21:47:56.000000000 -0400
57221@@ -31,7 +31,7 @@ struct cleancache_ops {
57222 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57223 void (*flush_inode)(int, struct cleancache_filekey);
57224 void (*flush_fs)(int);
57225-};
57226+} __no_const;
57227
57228 extern struct cleancache_ops
57229 cleancache_register_ops(struct cleancache_ops *ops);
57230diff -urNp linux-3.0.7/include/linux/compiler-gcc4.h linux-3.0.7/include/linux/compiler-gcc4.h
57231--- linux-3.0.7/include/linux/compiler-gcc4.h 2011-07-21 22:17:23.000000000 -0400
57232+++ linux-3.0.7/include/linux/compiler-gcc4.h 2011-08-26 19:49:56.000000000 -0400
57233@@ -31,6 +31,12 @@
57234
57235
57236 #if __GNUC_MINOR__ >= 5
57237+
57238+#ifdef CONSTIFY_PLUGIN
57239+#define __no_const __attribute__((no_const))
57240+#define __do_const __attribute__((do_const))
57241+#endif
57242+
57243 /*
57244 * Mark a position in code as unreachable. This can be used to
57245 * suppress control flow warnings after asm blocks that transfer
57246@@ -46,6 +52,11 @@
57247 #define __noclone __attribute__((__noclone__))
57248
57249 #endif
57250+
57251+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57252+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57253+#define __bos0(ptr) __bos((ptr), 0)
57254+#define __bos1(ptr) __bos((ptr), 1)
57255 #endif
57256
57257 #if __GNUC_MINOR__ > 0
57258diff -urNp linux-3.0.7/include/linux/compiler.h linux-3.0.7/include/linux/compiler.h
57259--- linux-3.0.7/include/linux/compiler.h 2011-07-21 22:17:23.000000000 -0400
57260+++ linux-3.0.7/include/linux/compiler.h 2011-10-06 04:17:55.000000000 -0400
57261@@ -5,31 +5,62 @@
57262
57263 #ifdef __CHECKER__
57264 # define __user __attribute__((noderef, address_space(1)))
57265+# define __force_user __force __user
57266 # define __kernel __attribute__((address_space(0)))
57267+# define __force_kernel __force __kernel
57268 # define __safe __attribute__((safe))
57269 # define __force __attribute__((force))
57270 # define __nocast __attribute__((nocast))
57271 # define __iomem __attribute__((noderef, address_space(2)))
57272+# define __force_iomem __force __iomem
57273 # define __acquires(x) __attribute__((context(x,0,1)))
57274 # define __releases(x) __attribute__((context(x,1,0)))
57275 # define __acquire(x) __context__(x,1)
57276 # define __release(x) __context__(x,-1)
57277 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57278 # define __percpu __attribute__((noderef, address_space(3)))
57279+# define __force_percpu __force __percpu
57280 #ifdef CONFIG_SPARSE_RCU_POINTER
57281 # define __rcu __attribute__((noderef, address_space(4)))
57282+# define __force_rcu __force __rcu
57283 #else
57284 # define __rcu
57285+# define __force_rcu
57286 #endif
57287 extern void __chk_user_ptr(const volatile void __user *);
57288 extern void __chk_io_ptr(const volatile void __iomem *);
57289+#elif defined(CHECKER_PLUGIN)
57290+//# define __user
57291+//# define __force_user
57292+//# define __kernel
57293+//# define __force_kernel
57294+# define __safe
57295+# define __force
57296+# define __nocast
57297+# define __iomem
57298+# define __force_iomem
57299+# define __chk_user_ptr(x) (void)0
57300+# define __chk_io_ptr(x) (void)0
57301+# define __builtin_warning(x, y...) (1)
57302+# define __acquires(x)
57303+# define __releases(x)
57304+# define __acquire(x) (void)0
57305+# define __release(x) (void)0
57306+# define __cond_lock(x,c) (c)
57307+# define __percpu
57308+# define __force_percpu
57309+# define __rcu
57310+# define __force_rcu
57311 #else
57312 # define __user
57313+# define __force_user
57314 # define __kernel
57315+# define __force_kernel
57316 # define __safe
57317 # define __force
57318 # define __nocast
57319 # define __iomem
57320+# define __force_iomem
57321 # define __chk_user_ptr(x) (void)0
57322 # define __chk_io_ptr(x) (void)0
57323 # define __builtin_warning(x, y...) (1)
57324@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57325 # define __release(x) (void)0
57326 # define __cond_lock(x,c) (c)
57327 # define __percpu
57328+# define __force_percpu
57329 # define __rcu
57330+# define __force_rcu
57331 #endif
57332
57333 #ifdef __KERNEL__
57334@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57335 # define __attribute_const__ /* unimplemented */
57336 #endif
57337
57338+#ifndef __no_const
57339+# define __no_const
57340+#endif
57341+
57342+#ifndef __do_const
57343+# define __do_const
57344+#endif
57345+
57346 /*
57347 * Tell gcc if a function is cold. The compiler will assume any path
57348 * directly leading to the call is unlikely.
57349@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57350 #define __cold
57351 #endif
57352
57353+#ifndef __alloc_size
57354+#define __alloc_size(...)
57355+#endif
57356+
57357+#ifndef __bos
57358+#define __bos(ptr, arg)
57359+#endif
57360+
57361+#ifndef __bos0
57362+#define __bos0(ptr)
57363+#endif
57364+
57365+#ifndef __bos1
57366+#define __bos1(ptr)
57367+#endif
57368+
57369 /* Simple shorthand for a section definition */
57370 #ifndef __section
57371 # define __section(S) __attribute__ ((__section__(#S)))
57372@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57373 * use is to mediate communication between process-level code and irq/NMI
57374 * handlers, all running on the same CPU.
57375 */
57376-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57377+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57378+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57379
57380 #endif /* __LINUX_COMPILER_H */
57381diff -urNp linux-3.0.7/include/linux/cpuset.h linux-3.0.7/include/linux/cpuset.h
57382--- linux-3.0.7/include/linux/cpuset.h 2011-07-21 22:17:23.000000000 -0400
57383+++ linux-3.0.7/include/linux/cpuset.h 2011-08-23 21:47:56.000000000 -0400
57384@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57385 * nodemask.
57386 */
57387 smp_mb();
57388- --ACCESS_ONCE(current->mems_allowed_change_disable);
57389+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57390 }
57391
57392 static inline void set_mems_allowed(nodemask_t nodemask)
57393diff -urNp linux-3.0.7/include/linux/crypto.h linux-3.0.7/include/linux/crypto.h
57394--- linux-3.0.7/include/linux/crypto.h 2011-07-21 22:17:23.000000000 -0400
57395+++ linux-3.0.7/include/linux/crypto.h 2011-08-23 21:47:56.000000000 -0400
57396@@ -361,7 +361,7 @@ struct cipher_tfm {
57397 const u8 *key, unsigned int keylen);
57398 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57399 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57400-};
57401+} __no_const;
57402
57403 struct hash_tfm {
57404 int (*init)(struct hash_desc *desc);
57405@@ -382,13 +382,13 @@ struct compress_tfm {
57406 int (*cot_decompress)(struct crypto_tfm *tfm,
57407 const u8 *src, unsigned int slen,
57408 u8 *dst, unsigned int *dlen);
57409-};
57410+} __no_const;
57411
57412 struct rng_tfm {
57413 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57414 unsigned int dlen);
57415 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57416-};
57417+} __no_const;
57418
57419 #define crt_ablkcipher crt_u.ablkcipher
57420 #define crt_aead crt_u.aead
57421diff -urNp linux-3.0.7/include/linux/decompress/mm.h linux-3.0.7/include/linux/decompress/mm.h
57422--- linux-3.0.7/include/linux/decompress/mm.h 2011-07-21 22:17:23.000000000 -0400
57423+++ linux-3.0.7/include/linux/decompress/mm.h 2011-08-23 21:47:56.000000000 -0400
57424@@ -77,7 +77,7 @@ static void free(void *where)
57425 * warnings when not needed (indeed large_malloc / large_free are not
57426 * needed by inflate */
57427
57428-#define malloc(a) kmalloc(a, GFP_KERNEL)
57429+#define malloc(a) kmalloc((a), GFP_KERNEL)
57430 #define free(a) kfree(a)
57431
57432 #define large_malloc(a) vmalloc(a)
57433diff -urNp linux-3.0.7/include/linux/dma-mapping.h linux-3.0.7/include/linux/dma-mapping.h
57434--- linux-3.0.7/include/linux/dma-mapping.h 2011-07-21 22:17:23.000000000 -0400
57435+++ linux-3.0.7/include/linux/dma-mapping.h 2011-08-26 19:49:56.000000000 -0400
57436@@ -50,7 +50,7 @@ struct dma_map_ops {
57437 int (*dma_supported)(struct device *dev, u64 mask);
57438 int (*set_dma_mask)(struct device *dev, u64 mask);
57439 int is_phys;
57440-};
57441+} __do_const;
57442
57443 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57444
57445diff -urNp linux-3.0.7/include/linux/efi.h linux-3.0.7/include/linux/efi.h
57446--- linux-3.0.7/include/linux/efi.h 2011-07-21 22:17:23.000000000 -0400
57447+++ linux-3.0.7/include/linux/efi.h 2011-08-23 21:47:56.000000000 -0400
57448@@ -410,7 +410,7 @@ struct efivar_operations {
57449 efi_get_variable_t *get_variable;
57450 efi_get_next_variable_t *get_next_variable;
57451 efi_set_variable_t *set_variable;
57452-};
57453+} __no_const;
57454
57455 struct efivars {
57456 /*
57457diff -urNp linux-3.0.7/include/linux/elf.h linux-3.0.7/include/linux/elf.h
57458--- linux-3.0.7/include/linux/elf.h 2011-07-21 22:17:23.000000000 -0400
57459+++ linux-3.0.7/include/linux/elf.h 2011-08-23 21:47:56.000000000 -0400
57460@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57461 #define PT_GNU_EH_FRAME 0x6474e550
57462
57463 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57464+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57465+
57466+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57467+
57468+/* Constants for the e_flags field */
57469+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57470+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57471+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57472+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57473+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57474+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57475
57476 /*
57477 * Extended Numbering
57478@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57479 #define DT_DEBUG 21
57480 #define DT_TEXTREL 22
57481 #define DT_JMPREL 23
57482+#define DT_FLAGS 30
57483+ #define DF_TEXTREL 0x00000004
57484 #define DT_ENCODING 32
57485 #define OLD_DT_LOOS 0x60000000
57486 #define DT_LOOS 0x6000000d
57487@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57488 #define PF_W 0x2
57489 #define PF_X 0x1
57490
57491+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57492+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57493+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57494+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57495+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57496+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57497+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57498+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57499+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57500+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57501+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57502+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57503+
57504 typedef struct elf32_phdr{
57505 Elf32_Word p_type;
57506 Elf32_Off p_offset;
57507@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57508 #define EI_OSABI 7
57509 #define EI_PAD 8
57510
57511+#define EI_PAX 14
57512+
57513 #define ELFMAG0 0x7f /* EI_MAG */
57514 #define ELFMAG1 'E'
57515 #define ELFMAG2 'L'
57516@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
57517 #define elf_note elf32_note
57518 #define elf_addr_t Elf32_Off
57519 #define Elf_Half Elf32_Half
57520+#define elf_dyn Elf32_Dyn
57521
57522 #else
57523
57524@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
57525 #define elf_note elf64_note
57526 #define elf_addr_t Elf64_Off
57527 #define Elf_Half Elf64_Half
57528+#define elf_dyn Elf64_Dyn
57529
57530 #endif
57531
57532diff -urNp linux-3.0.7/include/linux/firewire.h linux-3.0.7/include/linux/firewire.h
57533--- linux-3.0.7/include/linux/firewire.h 2011-07-21 22:17:23.000000000 -0400
57534+++ linux-3.0.7/include/linux/firewire.h 2011-08-23 21:47:56.000000000 -0400
57535@@ -428,7 +428,7 @@ struct fw_iso_context {
57536 union {
57537 fw_iso_callback_t sc;
57538 fw_iso_mc_callback_t mc;
57539- } callback;
57540+ } __no_const callback;
57541 void *callback_data;
57542 };
57543
57544diff -urNp linux-3.0.7/include/linux/fscache-cache.h linux-3.0.7/include/linux/fscache-cache.h
57545--- linux-3.0.7/include/linux/fscache-cache.h 2011-07-21 22:17:23.000000000 -0400
57546+++ linux-3.0.7/include/linux/fscache-cache.h 2011-08-23 21:47:56.000000000 -0400
57547@@ -102,7 +102,7 @@ struct fscache_operation {
57548 fscache_operation_release_t release;
57549 };
57550
57551-extern atomic_t fscache_op_debug_id;
57552+extern atomic_unchecked_t fscache_op_debug_id;
57553 extern void fscache_op_work_func(struct work_struct *work);
57554
57555 extern void fscache_enqueue_operation(struct fscache_operation *);
57556@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
57557 {
57558 INIT_WORK(&op->work, fscache_op_work_func);
57559 atomic_set(&op->usage, 1);
57560- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57561+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57562 op->processor = processor;
57563 op->release = release;
57564 INIT_LIST_HEAD(&op->pend_link);
57565diff -urNp linux-3.0.7/include/linux/fs.h linux-3.0.7/include/linux/fs.h
57566--- linux-3.0.7/include/linux/fs.h 2011-07-21 22:17:23.000000000 -0400
57567+++ linux-3.0.7/include/linux/fs.h 2011-08-26 19:49:56.000000000 -0400
57568@@ -109,6 +109,11 @@ struct inodes_stat_t {
57569 /* File was opened by fanotify and shouldn't generate fanotify events */
57570 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
57571
57572+/* Hack for grsec so as not to require read permission simply to execute
57573+ * a binary
57574+ */
57575+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57576+
57577 /*
57578 * The below are the various read and write types that we support. Some of
57579 * them include behavioral modifiers that send information down to the
57580@@ -1571,7 +1576,8 @@ struct file_operations {
57581 int (*setlease)(struct file *, long, struct file_lock **);
57582 long (*fallocate)(struct file *file, int mode, loff_t offset,
57583 loff_t len);
57584-};
57585+} __do_const;
57586+typedef struct file_operations __no_const file_operations_no_const;
57587
57588 #define IPERM_FLAG_RCU 0x0001
57589
57590diff -urNp linux-3.0.7/include/linux/fsnotify.h linux-3.0.7/include/linux/fsnotify.h
57591--- linux-3.0.7/include/linux/fsnotify.h 2011-07-21 22:17:23.000000000 -0400
57592+++ linux-3.0.7/include/linux/fsnotify.h 2011-08-24 18:10:29.000000000 -0400
57593@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
57594 */
57595 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
57596 {
57597- return kstrdup(name, GFP_KERNEL);
57598+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
57599 }
57600
57601 /*
57602diff -urNp linux-3.0.7/include/linux/fs_struct.h linux-3.0.7/include/linux/fs_struct.h
57603--- linux-3.0.7/include/linux/fs_struct.h 2011-07-21 22:17:23.000000000 -0400
57604+++ linux-3.0.7/include/linux/fs_struct.h 2011-08-23 21:47:56.000000000 -0400
57605@@ -6,7 +6,7 @@
57606 #include <linux/seqlock.h>
57607
57608 struct fs_struct {
57609- int users;
57610+ atomic_t users;
57611 spinlock_t lock;
57612 seqcount_t seq;
57613 int umask;
57614diff -urNp linux-3.0.7/include/linux/ftrace_event.h linux-3.0.7/include/linux/ftrace_event.h
57615--- linux-3.0.7/include/linux/ftrace_event.h 2011-07-21 22:17:23.000000000 -0400
57616+++ linux-3.0.7/include/linux/ftrace_event.h 2011-08-23 21:47:56.000000000 -0400
57617@@ -96,7 +96,7 @@ struct trace_event_functions {
57618 trace_print_func raw;
57619 trace_print_func hex;
57620 trace_print_func binary;
57621-};
57622+} __no_const;
57623
57624 struct trace_event {
57625 struct hlist_node node;
57626@@ -247,7 +247,7 @@ extern int trace_define_field(struct ftr
57627 extern int trace_add_event_call(struct ftrace_event_call *call);
57628 extern void trace_remove_event_call(struct ftrace_event_call *call);
57629
57630-#define is_signed_type(type) (((type)(-1)) < 0)
57631+#define is_signed_type(type) (((type)(-1)) < (type)1)
57632
57633 int trace_set_clr_event(const char *system, const char *event, int set);
57634
57635diff -urNp linux-3.0.7/include/linux/genhd.h linux-3.0.7/include/linux/genhd.h
57636--- linux-3.0.7/include/linux/genhd.h 2011-07-21 22:17:23.000000000 -0400
57637+++ linux-3.0.7/include/linux/genhd.h 2011-08-23 21:47:56.000000000 -0400
57638@@ -184,7 +184,7 @@ struct gendisk {
57639 struct kobject *slave_dir;
57640
57641 struct timer_rand_state *random;
57642- atomic_t sync_io; /* RAID */
57643+ atomic_unchecked_t sync_io; /* RAID */
57644 struct disk_events *ev;
57645 #ifdef CONFIG_BLK_DEV_INTEGRITY
57646 struct blk_integrity *integrity;
57647diff -urNp linux-3.0.7/include/linux/gracl.h linux-3.0.7/include/linux/gracl.h
57648--- linux-3.0.7/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57649+++ linux-3.0.7/include/linux/gracl.h 2011-08-23 21:48:14.000000000 -0400
57650@@ -0,0 +1,317 @@
57651+#ifndef GR_ACL_H
57652+#define GR_ACL_H
57653+
57654+#include <linux/grdefs.h>
57655+#include <linux/resource.h>
57656+#include <linux/capability.h>
57657+#include <linux/dcache.h>
57658+#include <asm/resource.h>
57659+
57660+/* Major status information */
57661+
57662+#define GR_VERSION "grsecurity 2.2.2"
57663+#define GRSECURITY_VERSION 0x2202
57664+
57665+enum {
57666+ GR_SHUTDOWN = 0,
57667+ GR_ENABLE = 1,
57668+ GR_SPROLE = 2,
57669+ GR_RELOAD = 3,
57670+ GR_SEGVMOD = 4,
57671+ GR_STATUS = 5,
57672+ GR_UNSPROLE = 6,
57673+ GR_PASSSET = 7,
57674+ GR_SPROLEPAM = 8,
57675+};
57676+
57677+/* Password setup definitions
57678+ * kernel/grhash.c */
57679+enum {
57680+ GR_PW_LEN = 128,
57681+ GR_SALT_LEN = 16,
57682+ GR_SHA_LEN = 32,
57683+};
57684+
57685+enum {
57686+ GR_SPROLE_LEN = 64,
57687+};
57688+
57689+enum {
57690+ GR_NO_GLOB = 0,
57691+ GR_REG_GLOB,
57692+ GR_CREATE_GLOB
57693+};
57694+
57695+#define GR_NLIMITS 32
57696+
57697+/* Begin Data Structures */
57698+
57699+struct sprole_pw {
57700+ unsigned char *rolename;
57701+ unsigned char salt[GR_SALT_LEN];
57702+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57703+};
57704+
57705+struct name_entry {
57706+ __u32 key;
57707+ ino_t inode;
57708+ dev_t device;
57709+ char *name;
57710+ __u16 len;
57711+ __u8 deleted;
57712+ struct name_entry *prev;
57713+ struct name_entry *next;
57714+};
57715+
57716+struct inodev_entry {
57717+ struct name_entry *nentry;
57718+ struct inodev_entry *prev;
57719+ struct inodev_entry *next;
57720+};
57721+
57722+struct acl_role_db {
57723+ struct acl_role_label **r_hash;
57724+ __u32 r_size;
57725+};
57726+
57727+struct inodev_db {
57728+ struct inodev_entry **i_hash;
57729+ __u32 i_size;
57730+};
57731+
57732+struct name_db {
57733+ struct name_entry **n_hash;
57734+ __u32 n_size;
57735+};
57736+
57737+struct crash_uid {
57738+ uid_t uid;
57739+ unsigned long expires;
57740+};
57741+
57742+struct gr_hash_struct {
57743+ void **table;
57744+ void **nametable;
57745+ void *first;
57746+ __u32 table_size;
57747+ __u32 used_size;
57748+ int type;
57749+};
57750+
57751+/* Userspace Grsecurity ACL data structures */
57752+
57753+struct acl_subject_label {
57754+ char *filename;
57755+ ino_t inode;
57756+ dev_t device;
57757+ __u32 mode;
57758+ kernel_cap_t cap_mask;
57759+ kernel_cap_t cap_lower;
57760+ kernel_cap_t cap_invert_audit;
57761+
57762+ struct rlimit res[GR_NLIMITS];
57763+ __u32 resmask;
57764+
57765+ __u8 user_trans_type;
57766+ __u8 group_trans_type;
57767+ uid_t *user_transitions;
57768+ gid_t *group_transitions;
57769+ __u16 user_trans_num;
57770+ __u16 group_trans_num;
57771+
57772+ __u32 sock_families[2];
57773+ __u32 ip_proto[8];
57774+ __u32 ip_type;
57775+ struct acl_ip_label **ips;
57776+ __u32 ip_num;
57777+ __u32 inaddr_any_override;
57778+
57779+ __u32 crashes;
57780+ unsigned long expires;
57781+
57782+ struct acl_subject_label *parent_subject;
57783+ struct gr_hash_struct *hash;
57784+ struct acl_subject_label *prev;
57785+ struct acl_subject_label *next;
57786+
57787+ struct acl_object_label **obj_hash;
57788+ __u32 obj_hash_size;
57789+ __u16 pax_flags;
57790+};
57791+
57792+struct role_allowed_ip {
57793+ __u32 addr;
57794+ __u32 netmask;
57795+
57796+ struct role_allowed_ip *prev;
57797+ struct role_allowed_ip *next;
57798+};
57799+
57800+struct role_transition {
57801+ char *rolename;
57802+
57803+ struct role_transition *prev;
57804+ struct role_transition *next;
57805+};
57806+
57807+struct acl_role_label {
57808+ char *rolename;
57809+ uid_t uidgid;
57810+ __u16 roletype;
57811+
57812+ __u16 auth_attempts;
57813+ unsigned long expires;
57814+
57815+ struct acl_subject_label *root_label;
57816+ struct gr_hash_struct *hash;
57817+
57818+ struct acl_role_label *prev;
57819+ struct acl_role_label *next;
57820+
57821+ struct role_transition *transitions;
57822+ struct role_allowed_ip *allowed_ips;
57823+ uid_t *domain_children;
57824+ __u16 domain_child_num;
57825+
57826+ struct acl_subject_label **subj_hash;
57827+ __u32 subj_hash_size;
57828+};
57829+
57830+struct user_acl_role_db {
57831+ struct acl_role_label **r_table;
57832+ __u32 num_pointers; /* Number of allocations to track */
57833+ __u32 num_roles; /* Number of roles */
57834+ __u32 num_domain_children; /* Number of domain children */
57835+ __u32 num_subjects; /* Number of subjects */
57836+ __u32 num_objects; /* Number of objects */
57837+};
57838+
57839+struct acl_object_label {
57840+ char *filename;
57841+ ino_t inode;
57842+ dev_t device;
57843+ __u32 mode;
57844+
57845+ struct acl_subject_label *nested;
57846+ struct acl_object_label *globbed;
57847+
57848+ /* next two structures not used */
57849+
57850+ struct acl_object_label *prev;
57851+ struct acl_object_label *next;
57852+};
57853+
57854+struct acl_ip_label {
57855+ char *iface;
57856+ __u32 addr;
57857+ __u32 netmask;
57858+ __u16 low, high;
57859+ __u8 mode;
57860+ __u32 type;
57861+ __u32 proto[8];
57862+
57863+ /* next two structures not used */
57864+
57865+ struct acl_ip_label *prev;
57866+ struct acl_ip_label *next;
57867+};
57868+
57869+struct gr_arg {
57870+ struct user_acl_role_db role_db;
57871+ unsigned char pw[GR_PW_LEN];
57872+ unsigned char salt[GR_SALT_LEN];
57873+ unsigned char sum[GR_SHA_LEN];
57874+ unsigned char sp_role[GR_SPROLE_LEN];
57875+ struct sprole_pw *sprole_pws;
57876+ dev_t segv_device;
57877+ ino_t segv_inode;
57878+ uid_t segv_uid;
57879+ __u16 num_sprole_pws;
57880+ __u16 mode;
57881+};
57882+
57883+struct gr_arg_wrapper {
57884+ struct gr_arg *arg;
57885+ __u32 version;
57886+ __u32 size;
57887+};
57888+
57889+struct subject_map {
57890+ struct acl_subject_label *user;
57891+ struct acl_subject_label *kernel;
57892+ struct subject_map *prev;
57893+ struct subject_map *next;
57894+};
57895+
57896+struct acl_subj_map_db {
57897+ struct subject_map **s_hash;
57898+ __u32 s_size;
57899+};
57900+
57901+/* End Data Structures Section */
57902+
57903+/* Hash functions generated by empirical testing by Brad Spengler
57904+ Makes good use of the low bits of the inode. Generally 0-1 times
57905+ in loop for successful match. 0-3 for unsuccessful match.
57906+ Shift/add algorithm with modulus of table size and an XOR*/
57907+
57908+static __inline__ unsigned int
57909+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
57910+{
57911+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
57912+}
57913+
57914+ static __inline__ unsigned int
57915+shash(const struct acl_subject_label *userp, const unsigned int sz)
57916+{
57917+ return ((const unsigned long)userp % sz);
57918+}
57919+
57920+static __inline__ unsigned int
57921+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
57922+{
57923+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
57924+}
57925+
57926+static __inline__ unsigned int
57927+nhash(const char *name, const __u16 len, const unsigned int sz)
57928+{
57929+ return full_name_hash((const unsigned char *)name, len) % sz;
57930+}
57931+
57932+#define FOR_EACH_ROLE_START(role) \
57933+ role = role_list; \
57934+ while (role) {
57935+
57936+#define FOR_EACH_ROLE_END(role) \
57937+ role = role->prev; \
57938+ }
57939+
57940+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
57941+ subj = NULL; \
57942+ iter = 0; \
57943+ while (iter < role->subj_hash_size) { \
57944+ if (subj == NULL) \
57945+ subj = role->subj_hash[iter]; \
57946+ if (subj == NULL) { \
57947+ iter++; \
57948+ continue; \
57949+ }
57950+
57951+#define FOR_EACH_SUBJECT_END(subj,iter) \
57952+ subj = subj->next; \
57953+ if (subj == NULL) \
57954+ iter++; \
57955+ }
57956+
57957+
57958+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
57959+ subj = role->hash->first; \
57960+ while (subj != NULL) {
57961+
57962+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
57963+ subj = subj->next; \
57964+ }
57965+
57966+#endif
57967+
57968diff -urNp linux-3.0.7/include/linux/gralloc.h linux-3.0.7/include/linux/gralloc.h
57969--- linux-3.0.7/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
57970+++ linux-3.0.7/include/linux/gralloc.h 2011-08-23 21:48:14.000000000 -0400
57971@@ -0,0 +1,9 @@
57972+#ifndef __GRALLOC_H
57973+#define __GRALLOC_H
57974+
57975+void acl_free_all(void);
57976+int acl_alloc_stack_init(unsigned long size);
57977+void *acl_alloc(unsigned long len);
57978+void *acl_alloc_num(unsigned long num, unsigned long len);
57979+
57980+#endif
57981diff -urNp linux-3.0.7/include/linux/grdefs.h linux-3.0.7/include/linux/grdefs.h
57982--- linux-3.0.7/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
57983+++ linux-3.0.7/include/linux/grdefs.h 2011-08-23 21:48:14.000000000 -0400
57984@@ -0,0 +1,140 @@
57985+#ifndef GRDEFS_H
57986+#define GRDEFS_H
57987+
57988+/* Begin grsecurity status declarations */
57989+
57990+enum {
57991+ GR_READY = 0x01,
57992+ GR_STATUS_INIT = 0x00 // disabled state
57993+};
57994+
57995+/* Begin ACL declarations */
57996+
57997+/* Role flags */
57998+
57999+enum {
58000+ GR_ROLE_USER = 0x0001,
58001+ GR_ROLE_GROUP = 0x0002,
58002+ GR_ROLE_DEFAULT = 0x0004,
58003+ GR_ROLE_SPECIAL = 0x0008,
58004+ GR_ROLE_AUTH = 0x0010,
58005+ GR_ROLE_NOPW = 0x0020,
58006+ GR_ROLE_GOD = 0x0040,
58007+ GR_ROLE_LEARN = 0x0080,
58008+ GR_ROLE_TPE = 0x0100,
58009+ GR_ROLE_DOMAIN = 0x0200,
58010+ GR_ROLE_PAM = 0x0400,
58011+ GR_ROLE_PERSIST = 0x0800
58012+};
58013+
58014+/* ACL Subject and Object mode flags */
58015+enum {
58016+ GR_DELETED = 0x80000000
58017+};
58018+
58019+/* ACL Object-only mode flags */
58020+enum {
58021+ GR_READ = 0x00000001,
58022+ GR_APPEND = 0x00000002,
58023+ GR_WRITE = 0x00000004,
58024+ GR_EXEC = 0x00000008,
58025+ GR_FIND = 0x00000010,
58026+ GR_INHERIT = 0x00000020,
58027+ GR_SETID = 0x00000040,
58028+ GR_CREATE = 0x00000080,
58029+ GR_DELETE = 0x00000100,
58030+ GR_LINK = 0x00000200,
58031+ GR_AUDIT_READ = 0x00000400,
58032+ GR_AUDIT_APPEND = 0x00000800,
58033+ GR_AUDIT_WRITE = 0x00001000,
58034+ GR_AUDIT_EXEC = 0x00002000,
58035+ GR_AUDIT_FIND = 0x00004000,
58036+ GR_AUDIT_INHERIT= 0x00008000,
58037+ GR_AUDIT_SETID = 0x00010000,
58038+ GR_AUDIT_CREATE = 0x00020000,
58039+ GR_AUDIT_DELETE = 0x00040000,
58040+ GR_AUDIT_LINK = 0x00080000,
58041+ GR_PTRACERD = 0x00100000,
58042+ GR_NOPTRACE = 0x00200000,
58043+ GR_SUPPRESS = 0x00400000,
58044+ GR_NOLEARN = 0x00800000,
58045+ GR_INIT_TRANSFER= 0x01000000
58046+};
58047+
58048+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58049+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58050+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58051+
58052+/* ACL subject-only mode flags */
58053+enum {
58054+ GR_KILL = 0x00000001,
58055+ GR_VIEW = 0x00000002,
58056+ GR_PROTECTED = 0x00000004,
58057+ GR_LEARN = 0x00000008,
58058+ GR_OVERRIDE = 0x00000010,
58059+ /* just a placeholder, this mode is only used in userspace */
58060+ GR_DUMMY = 0x00000020,
58061+ GR_PROTSHM = 0x00000040,
58062+ GR_KILLPROC = 0x00000080,
58063+ GR_KILLIPPROC = 0x00000100,
58064+ /* just a placeholder, this mode is only used in userspace */
58065+ GR_NOTROJAN = 0x00000200,
58066+ GR_PROTPROCFD = 0x00000400,
58067+ GR_PROCACCT = 0x00000800,
58068+ GR_RELAXPTRACE = 0x00001000,
58069+ GR_NESTED = 0x00002000,
58070+ GR_INHERITLEARN = 0x00004000,
58071+ GR_PROCFIND = 0x00008000,
58072+ GR_POVERRIDE = 0x00010000,
58073+ GR_KERNELAUTH = 0x00020000,
58074+ GR_ATSECURE = 0x00040000,
58075+ GR_SHMEXEC = 0x00080000
58076+};
58077+
58078+enum {
58079+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58080+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58081+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58082+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58083+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58084+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58085+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58086+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58087+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58088+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58089+};
58090+
58091+enum {
58092+ GR_ID_USER = 0x01,
58093+ GR_ID_GROUP = 0x02,
58094+};
58095+
58096+enum {
58097+ GR_ID_ALLOW = 0x01,
58098+ GR_ID_DENY = 0x02,
58099+};
58100+
58101+#define GR_CRASH_RES 31
58102+#define GR_UIDTABLE_MAX 500
58103+
58104+/* begin resource learning section */
58105+enum {
58106+ GR_RLIM_CPU_BUMP = 60,
58107+ GR_RLIM_FSIZE_BUMP = 50000,
58108+ GR_RLIM_DATA_BUMP = 10000,
58109+ GR_RLIM_STACK_BUMP = 1000,
58110+ GR_RLIM_CORE_BUMP = 10000,
58111+ GR_RLIM_RSS_BUMP = 500000,
58112+ GR_RLIM_NPROC_BUMP = 1,
58113+ GR_RLIM_NOFILE_BUMP = 5,
58114+ GR_RLIM_MEMLOCK_BUMP = 50000,
58115+ GR_RLIM_AS_BUMP = 500000,
58116+ GR_RLIM_LOCKS_BUMP = 2,
58117+ GR_RLIM_SIGPENDING_BUMP = 5,
58118+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58119+ GR_RLIM_NICE_BUMP = 1,
58120+ GR_RLIM_RTPRIO_BUMP = 1,
58121+ GR_RLIM_RTTIME_BUMP = 1000000
58122+};
58123+
58124+#endif
58125diff -urNp linux-3.0.7/include/linux/grinternal.h linux-3.0.7/include/linux/grinternal.h
58126--- linux-3.0.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58127+++ linux-3.0.7/include/linux/grinternal.h 2011-10-17 00:25:19.000000000 -0400
58128@@ -0,0 +1,219 @@
58129+#ifndef __GRINTERNAL_H
58130+#define __GRINTERNAL_H
58131+
58132+#ifdef CONFIG_GRKERNSEC
58133+
58134+#include <linux/fs.h>
58135+#include <linux/mnt_namespace.h>
58136+#include <linux/nsproxy.h>
58137+#include <linux/gracl.h>
58138+#include <linux/grdefs.h>
58139+#include <linux/grmsg.h>
58140+
58141+void gr_add_learn_entry(const char *fmt, ...)
58142+ __attribute__ ((format (printf, 1, 2)));
58143+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58144+ const struct vfsmount *mnt);
58145+__u32 gr_check_create(const struct dentry *new_dentry,
58146+ const struct dentry *parent,
58147+ const struct vfsmount *mnt, const __u32 mode);
58148+int gr_check_protected_task(const struct task_struct *task);
58149+__u32 to_gr_audit(const __u32 reqmode);
58150+int gr_set_acls(const int type);
58151+int gr_apply_subject_to_task(struct task_struct *task);
58152+int gr_acl_is_enabled(void);
58153+char gr_roletype_to_char(void);
58154+
58155+void gr_handle_alertkill(struct task_struct *task);
58156+char *gr_to_filename(const struct dentry *dentry,
58157+ const struct vfsmount *mnt);
58158+char *gr_to_filename1(const struct dentry *dentry,
58159+ const struct vfsmount *mnt);
58160+char *gr_to_filename2(const struct dentry *dentry,
58161+ const struct vfsmount *mnt);
58162+char *gr_to_filename3(const struct dentry *dentry,
58163+ const struct vfsmount *mnt);
58164+
58165+extern int grsec_enable_harden_ptrace;
58166+extern int grsec_enable_link;
58167+extern int grsec_enable_fifo;
58168+extern int grsec_enable_execve;
58169+extern int grsec_enable_shm;
58170+extern int grsec_enable_execlog;
58171+extern int grsec_enable_signal;
58172+extern int grsec_enable_audit_ptrace;
58173+extern int grsec_enable_forkfail;
58174+extern int grsec_enable_time;
58175+extern int grsec_enable_rofs;
58176+extern int grsec_enable_chroot_shmat;
58177+extern int grsec_enable_chroot_mount;
58178+extern int grsec_enable_chroot_double;
58179+extern int grsec_enable_chroot_pivot;
58180+extern int grsec_enable_chroot_chdir;
58181+extern int grsec_enable_chroot_chmod;
58182+extern int grsec_enable_chroot_mknod;
58183+extern int grsec_enable_chroot_fchdir;
58184+extern int grsec_enable_chroot_nice;
58185+extern int grsec_enable_chroot_execlog;
58186+extern int grsec_enable_chroot_caps;
58187+extern int grsec_enable_chroot_sysctl;
58188+extern int grsec_enable_chroot_unix;
58189+extern int grsec_enable_tpe;
58190+extern int grsec_tpe_gid;
58191+extern int grsec_enable_tpe_all;
58192+extern int grsec_enable_tpe_invert;
58193+extern int grsec_enable_socket_all;
58194+extern int grsec_socket_all_gid;
58195+extern int grsec_enable_socket_client;
58196+extern int grsec_socket_client_gid;
58197+extern int grsec_enable_socket_server;
58198+extern int grsec_socket_server_gid;
58199+extern int grsec_audit_gid;
58200+extern int grsec_enable_group;
58201+extern int grsec_enable_audit_textrel;
58202+extern int grsec_enable_log_rwxmaps;
58203+extern int grsec_enable_mount;
58204+extern int grsec_enable_chdir;
58205+extern int grsec_resource_logging;
58206+extern int grsec_enable_blackhole;
58207+extern int grsec_lastack_retries;
58208+extern int grsec_enable_brute;
58209+extern int grsec_lock;
58210+
58211+extern spinlock_t grsec_alert_lock;
58212+extern unsigned long grsec_alert_wtime;
58213+extern unsigned long grsec_alert_fyet;
58214+
58215+extern spinlock_t grsec_audit_lock;
58216+
58217+extern rwlock_t grsec_exec_file_lock;
58218+
58219+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58220+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58221+ (tsk)->exec_file->f_vfsmnt) : "/")
58222+
58223+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58224+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58225+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58226+
58227+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58228+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58229+ (tsk)->exec_file->f_vfsmnt) : "/")
58230+
58231+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58232+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58233+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58234+
58235+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58236+
58237+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58238+
58239+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58240+ (task)->pid, (cred)->uid, \
58241+ (cred)->euid, (cred)->gid, (cred)->egid, \
58242+ gr_parent_task_fullpath(task), \
58243+ (task)->real_parent->comm, (task)->real_parent->pid, \
58244+ (pcred)->uid, (pcred)->euid, \
58245+ (pcred)->gid, (pcred)->egid
58246+
58247+#define GR_CHROOT_CAPS {{ \
58248+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58249+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58250+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58251+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58252+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58253+ CAP_TO_MASK(CAP_IPC_OWNER) , CAP_TO_MASK(CAP_SYSLOG) }}
58254+
58255+#define security_learn(normal_msg,args...) \
58256+({ \
58257+ read_lock(&grsec_exec_file_lock); \
58258+ gr_add_learn_entry(normal_msg "\n", ## args); \
58259+ read_unlock(&grsec_exec_file_lock); \
58260+})
58261+
58262+enum {
58263+ GR_DO_AUDIT,
58264+ GR_DONT_AUDIT,
58265+ /* used for non-audit messages that we shouldn't kill the task on */
58266+ GR_DONT_AUDIT_GOOD
58267+};
58268+
58269+enum {
58270+ GR_TTYSNIFF,
58271+ GR_RBAC,
58272+ GR_RBAC_STR,
58273+ GR_STR_RBAC,
58274+ GR_RBAC_MODE2,
58275+ GR_RBAC_MODE3,
58276+ GR_FILENAME,
58277+ GR_SYSCTL_HIDDEN,
58278+ GR_NOARGS,
58279+ GR_ONE_INT,
58280+ GR_ONE_INT_TWO_STR,
58281+ GR_ONE_STR,
58282+ GR_STR_INT,
58283+ GR_TWO_STR_INT,
58284+ GR_TWO_INT,
58285+ GR_TWO_U64,
58286+ GR_THREE_INT,
58287+ GR_FIVE_INT_TWO_STR,
58288+ GR_TWO_STR,
58289+ GR_THREE_STR,
58290+ GR_FOUR_STR,
58291+ GR_STR_FILENAME,
58292+ GR_FILENAME_STR,
58293+ GR_FILENAME_TWO_INT,
58294+ GR_FILENAME_TWO_INT_STR,
58295+ GR_TEXTREL,
58296+ GR_PTRACE,
58297+ GR_RESOURCE,
58298+ GR_CAP,
58299+ GR_SIG,
58300+ GR_SIG2,
58301+ GR_CRASH1,
58302+ GR_CRASH2,
58303+ GR_PSACCT,
58304+ GR_RWXMAP
58305+};
58306+
58307+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58308+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58309+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58310+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58311+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58312+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58313+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58314+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58315+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58316+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58317+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58318+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58319+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58320+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58321+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58322+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58323+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58324+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58325+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58326+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58327+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58328+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58329+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58330+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58331+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58332+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58333+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58334+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58335+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58336+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58337+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58338+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58339+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58340+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58341+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58342+
58343+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58344+
58345+#endif
58346+
58347+#endif
58348diff -urNp linux-3.0.7/include/linux/grmsg.h linux-3.0.7/include/linux/grmsg.h
58349--- linux-3.0.7/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58350+++ linux-3.0.7/include/linux/grmsg.h 2011-09-14 09:16:54.000000000 -0400
58351@@ -0,0 +1,108 @@
58352+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58353+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58354+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58355+#define GR_STOPMOD_MSG "denied modification of module state by "
58356+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58357+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58358+#define GR_IOPERM_MSG "denied use of ioperm() by "
58359+#define GR_IOPL_MSG "denied use of iopl() by "
58360+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58361+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58362+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58363+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58364+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58365+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58366+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58367+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58368+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58369+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58370+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58371+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58372+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58373+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58374+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58375+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58376+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58377+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58378+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58379+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58380+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58381+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58382+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58383+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58384+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58385+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58386+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58387+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58388+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58389+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58390+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58391+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58392+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58393+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58394+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58395+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58396+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58397+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58398+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58399+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58400+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58401+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58402+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58403+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58404+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58405+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58406+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58407+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58408+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58409+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58410+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58411+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58412+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58413+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58414+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58415+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58416+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58417+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58418+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58419+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58420+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58421+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58422+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58423+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58424+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58425+#define GR_NICE_CHROOT_MSG "denied priority change by "
58426+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58427+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58428+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58429+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58430+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58431+#define GR_TIME_MSG "time set by "
58432+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58433+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58434+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58435+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58436+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58437+#define GR_BIND_MSG "denied bind() by "
58438+#define GR_CONNECT_MSG "denied connect() by "
58439+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58440+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58441+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58442+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58443+#define GR_CAP_ACL_MSG "use of %s denied for "
58444+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58445+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58446+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58447+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58448+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58449+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58450+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58451+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58452+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58453+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58454+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58455+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58456+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58457+#define GR_VM86_MSG "denied use of vm86 by "
58458+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58459+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58460diff -urNp linux-3.0.7/include/linux/grsecurity.h linux-3.0.7/include/linux/grsecurity.h
58461--- linux-3.0.7/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58462+++ linux-3.0.7/include/linux/grsecurity.h 2011-10-17 06:35:30.000000000 -0400
58463@@ -0,0 +1,228 @@
58464+#ifndef GR_SECURITY_H
58465+#define GR_SECURITY_H
58466+#include <linux/fs.h>
58467+#include <linux/fs_struct.h>
58468+#include <linux/binfmts.h>
58469+#include <linux/gracl.h>
58470+
58471+/* notify of brain-dead configs */
58472+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58473+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58474+#endif
58475+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58476+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58477+#endif
58478+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58479+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58480+#endif
58481+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58482+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58483+#endif
58484+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58485+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58486+#endif
58487+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58488+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58489+#endif
58490+
58491+#include <linux/compat.h>
58492+
58493+struct user_arg_ptr {
58494+#ifdef CONFIG_COMPAT
58495+ bool is_compat;
58496+#endif
58497+ union {
58498+ const char __user *const __user *native;
58499+#ifdef CONFIG_COMPAT
58500+ compat_uptr_t __user *compat;
58501+#endif
58502+ } ptr;
58503+};
58504+
58505+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58506+void gr_handle_brute_check(void);
58507+void gr_handle_kernel_exploit(void);
58508+int gr_process_user_ban(void);
58509+
58510+char gr_roletype_to_char(void);
58511+
58512+int gr_acl_enable_at_secure(void);
58513+
58514+int gr_check_user_change(int real, int effective, int fs);
58515+int gr_check_group_change(int real, int effective, int fs);
58516+
58517+void gr_del_task_from_ip_table(struct task_struct *p);
58518+
58519+int gr_pid_is_chrooted(struct task_struct *p);
58520+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58521+int gr_handle_chroot_nice(void);
58522+int gr_handle_chroot_sysctl(const int op);
58523+int gr_handle_chroot_setpriority(struct task_struct *p,
58524+ const int niceval);
58525+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58526+int gr_handle_chroot_chroot(const struct dentry *dentry,
58527+ const struct vfsmount *mnt);
58528+void gr_handle_chroot_chdir(struct path *path);
58529+int gr_handle_chroot_chmod(const struct dentry *dentry,
58530+ const struct vfsmount *mnt, const int mode);
58531+int gr_handle_chroot_mknod(const struct dentry *dentry,
58532+ const struct vfsmount *mnt, const int mode);
58533+int gr_handle_chroot_mount(const struct dentry *dentry,
58534+ const struct vfsmount *mnt,
58535+ const char *dev_name);
58536+int gr_handle_chroot_pivot(void);
58537+int gr_handle_chroot_unix(const pid_t pid);
58538+
58539+int gr_handle_rawio(const struct inode *inode);
58540+
58541+void gr_handle_ioperm(void);
58542+void gr_handle_iopl(void);
58543+
58544+int gr_tpe_allow(const struct file *file);
58545+
58546+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58547+void gr_clear_chroot_entries(struct task_struct *task);
58548+
58549+void gr_log_forkfail(const int retval);
58550+void gr_log_timechange(void);
58551+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58552+void gr_log_chdir(const struct dentry *dentry,
58553+ const struct vfsmount *mnt);
58554+void gr_log_chroot_exec(const struct dentry *dentry,
58555+ const struct vfsmount *mnt);
58556+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58557+void gr_log_remount(const char *devname, const int retval);
58558+void gr_log_unmount(const char *devname, const int retval);
58559+void gr_log_mount(const char *from, const char *to, const int retval);
58560+void gr_log_textrel(struct vm_area_struct *vma);
58561+void gr_log_rwxmmap(struct file *file);
58562+void gr_log_rwxmprotect(struct file *file);
58563+
58564+int gr_handle_follow_link(const struct inode *parent,
58565+ const struct inode *inode,
58566+ const struct dentry *dentry,
58567+ const struct vfsmount *mnt);
58568+int gr_handle_fifo(const struct dentry *dentry,
58569+ const struct vfsmount *mnt,
58570+ const struct dentry *dir, const int flag,
58571+ const int acc_mode);
58572+int gr_handle_hardlink(const struct dentry *dentry,
58573+ const struct vfsmount *mnt,
58574+ struct inode *inode,
58575+ const int mode, const char *to);
58576+
58577+int gr_is_capable(const int cap);
58578+int gr_is_capable_nolog(const int cap);
58579+void gr_learn_resource(const struct task_struct *task, const int limit,
58580+ const unsigned long wanted, const int gt);
58581+void gr_copy_label(struct task_struct *tsk);
58582+void gr_handle_crash(struct task_struct *task, const int sig);
58583+int gr_handle_signal(const struct task_struct *p, const int sig);
58584+int gr_check_crash_uid(const uid_t uid);
58585+int gr_check_protected_task(const struct task_struct *task);
58586+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58587+int gr_acl_handle_mmap(const struct file *file,
58588+ const unsigned long prot);
58589+int gr_acl_handle_mprotect(const struct file *file,
58590+ const unsigned long prot);
58591+int gr_check_hidden_task(const struct task_struct *tsk);
58592+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58593+ const struct vfsmount *mnt);
58594+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58595+ const struct vfsmount *mnt);
58596+__u32 gr_acl_handle_access(const struct dentry *dentry,
58597+ const struct vfsmount *mnt, const int fmode);
58598+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58599+ const struct vfsmount *mnt, mode_t mode);
58600+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58601+ const struct vfsmount *mnt, mode_t mode);
58602+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58603+ const struct vfsmount *mnt);
58604+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58605+ const struct vfsmount *mnt);
58606+int gr_handle_ptrace(struct task_struct *task, const long request);
58607+int gr_handle_proc_ptrace(struct task_struct *task);
58608+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58609+ const struct vfsmount *mnt);
58610+int gr_check_crash_exec(const struct file *filp);
58611+int gr_acl_is_enabled(void);
58612+void gr_set_kernel_label(struct task_struct *task);
58613+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58614+ const gid_t gid);
58615+int gr_set_proc_label(const struct dentry *dentry,
58616+ const struct vfsmount *mnt,
58617+ const int unsafe_share);
58618+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58619+ const struct vfsmount *mnt);
58620+__u32 gr_acl_handle_open(const struct dentry *dentry,
58621+ const struct vfsmount *mnt, const int fmode);
58622+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58623+ const struct dentry *p_dentry,
58624+ const struct vfsmount *p_mnt, const int fmode,
58625+ const int imode);
58626+void gr_handle_create(const struct dentry *dentry,
58627+ const struct vfsmount *mnt);
58628+void gr_handle_proc_create(const struct dentry *dentry,
58629+ const struct inode *inode);
58630+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58631+ const struct dentry *parent_dentry,
58632+ const struct vfsmount *parent_mnt,
58633+ const int mode);
58634+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58635+ const struct dentry *parent_dentry,
58636+ const struct vfsmount *parent_mnt);
58637+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58638+ const struct vfsmount *mnt);
58639+void gr_handle_delete(const ino_t ino, const dev_t dev);
58640+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58641+ const struct vfsmount *mnt);
58642+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58643+ const struct dentry *parent_dentry,
58644+ const struct vfsmount *parent_mnt,
58645+ const char *from);
58646+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58647+ const struct dentry *parent_dentry,
58648+ const struct vfsmount *parent_mnt,
58649+ const struct dentry *old_dentry,
58650+ const struct vfsmount *old_mnt, const char *to);
58651+int gr_acl_handle_rename(struct dentry *new_dentry,
58652+ struct dentry *parent_dentry,
58653+ const struct vfsmount *parent_mnt,
58654+ struct dentry *old_dentry,
58655+ struct inode *old_parent_inode,
58656+ struct vfsmount *old_mnt, const char *newname);
58657+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58658+ struct dentry *old_dentry,
58659+ struct dentry *new_dentry,
58660+ struct vfsmount *mnt, const __u8 replace);
58661+__u32 gr_check_link(const struct dentry *new_dentry,
58662+ const struct dentry *parent_dentry,
58663+ const struct vfsmount *parent_mnt,
58664+ const struct dentry *old_dentry,
58665+ const struct vfsmount *old_mnt);
58666+int gr_acl_handle_filldir(const struct file *file, const char *name,
58667+ const unsigned int namelen, const ino_t ino);
58668+
58669+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58670+ const struct vfsmount *mnt);
58671+void gr_acl_handle_exit(void);
58672+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58673+int gr_acl_handle_procpidmem(const struct task_struct *task);
58674+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58675+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58676+void gr_audit_ptrace(struct task_struct *task);
58677+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58678+
58679+#ifdef CONFIG_GRKERNSEC
58680+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58681+void gr_handle_vm86(void);
58682+void gr_handle_mem_readwrite(u64 from, u64 to);
58683+
58684+extern int grsec_enable_dmesg;
58685+extern int grsec_disable_privio;
58686+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58687+extern int grsec_enable_chroot_findtask;
58688+#endif
58689+#endif
58690+
58691+#endif
58692diff -urNp linux-3.0.7/include/linux/grsock.h linux-3.0.7/include/linux/grsock.h
58693--- linux-3.0.7/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
58694+++ linux-3.0.7/include/linux/grsock.h 2011-08-23 21:48:14.000000000 -0400
58695@@ -0,0 +1,19 @@
58696+#ifndef __GRSOCK_H
58697+#define __GRSOCK_H
58698+
58699+extern void gr_attach_curr_ip(const struct sock *sk);
58700+extern int gr_handle_sock_all(const int family, const int type,
58701+ const int protocol);
58702+extern int gr_handle_sock_server(const struct sockaddr *sck);
58703+extern int gr_handle_sock_server_other(const struct sock *sck);
58704+extern int gr_handle_sock_client(const struct sockaddr *sck);
58705+extern int gr_search_connect(struct socket * sock,
58706+ struct sockaddr_in * addr);
58707+extern int gr_search_bind(struct socket * sock,
58708+ struct sockaddr_in * addr);
58709+extern int gr_search_listen(struct socket * sock);
58710+extern int gr_search_accept(struct socket * sock);
58711+extern int gr_search_socket(const int domain, const int type,
58712+ const int protocol);
58713+
58714+#endif
58715diff -urNp linux-3.0.7/include/linux/hid.h linux-3.0.7/include/linux/hid.h
58716--- linux-3.0.7/include/linux/hid.h 2011-07-21 22:17:23.000000000 -0400
58717+++ linux-3.0.7/include/linux/hid.h 2011-08-23 21:47:56.000000000 -0400
58718@@ -675,7 +675,7 @@ struct hid_ll_driver {
58719 unsigned int code, int value);
58720
58721 int (*parse)(struct hid_device *hdev);
58722-};
58723+} __no_const;
58724
58725 #define PM_HINT_FULLON 1<<5
58726 #define PM_HINT_NORMAL 1<<1
58727diff -urNp linux-3.0.7/include/linux/highmem.h linux-3.0.7/include/linux/highmem.h
58728--- linux-3.0.7/include/linux/highmem.h 2011-07-21 22:17:23.000000000 -0400
58729+++ linux-3.0.7/include/linux/highmem.h 2011-08-23 21:47:56.000000000 -0400
58730@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
58731 kunmap_atomic(kaddr, KM_USER0);
58732 }
58733
58734+static inline void sanitize_highpage(struct page *page)
58735+{
58736+ void *kaddr;
58737+ unsigned long flags;
58738+
58739+ local_irq_save(flags);
58740+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58741+ clear_page(kaddr);
58742+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58743+ local_irq_restore(flags);
58744+}
58745+
58746 static inline void zero_user_segments(struct page *page,
58747 unsigned start1, unsigned end1,
58748 unsigned start2, unsigned end2)
58749diff -urNp linux-3.0.7/include/linux/i2c.h linux-3.0.7/include/linux/i2c.h
58750--- linux-3.0.7/include/linux/i2c.h 2011-07-21 22:17:23.000000000 -0400
58751+++ linux-3.0.7/include/linux/i2c.h 2011-08-23 21:47:56.000000000 -0400
58752@@ -346,6 +346,7 @@ struct i2c_algorithm {
58753 /* To determine what the adapter supports */
58754 u32 (*functionality) (struct i2c_adapter *);
58755 };
58756+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58757
58758 /*
58759 * i2c_adapter is the structure used to identify a physical i2c bus along
58760diff -urNp linux-3.0.7/include/linux/i2o.h linux-3.0.7/include/linux/i2o.h
58761--- linux-3.0.7/include/linux/i2o.h 2011-07-21 22:17:23.000000000 -0400
58762+++ linux-3.0.7/include/linux/i2o.h 2011-08-23 21:47:56.000000000 -0400
58763@@ -564,7 +564,7 @@ struct i2o_controller {
58764 struct i2o_device *exec; /* Executive */
58765 #if BITS_PER_LONG == 64
58766 spinlock_t context_list_lock; /* lock for context_list */
58767- atomic_t context_list_counter; /* needed for unique contexts */
58768+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58769 struct list_head context_list; /* list of context id's
58770 and pointers */
58771 #endif
58772diff -urNp linux-3.0.7/include/linux/init.h linux-3.0.7/include/linux/init.h
58773--- linux-3.0.7/include/linux/init.h 2011-07-21 22:17:23.000000000 -0400
58774+++ linux-3.0.7/include/linux/init.h 2011-08-23 21:47:56.000000000 -0400
58775@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
58776
58777 /* Each module must use one module_init(). */
58778 #define module_init(initfn) \
58779- static inline initcall_t __inittest(void) \
58780+ static inline __used initcall_t __inittest(void) \
58781 { return initfn; } \
58782 int init_module(void) __attribute__((alias(#initfn)));
58783
58784 /* This is only required if you want to be unloadable. */
58785 #define module_exit(exitfn) \
58786- static inline exitcall_t __exittest(void) \
58787+ static inline __used exitcall_t __exittest(void) \
58788 { return exitfn; } \
58789 void cleanup_module(void) __attribute__((alias(#exitfn)));
58790
58791diff -urNp linux-3.0.7/include/linux/init_task.h linux-3.0.7/include/linux/init_task.h
58792--- linux-3.0.7/include/linux/init_task.h 2011-07-21 22:17:23.000000000 -0400
58793+++ linux-3.0.7/include/linux/init_task.h 2011-08-23 21:47:56.000000000 -0400
58794@@ -126,6 +126,12 @@ extern struct cred init_cred;
58795 # define INIT_PERF_EVENTS(tsk)
58796 #endif
58797
58798+#ifdef CONFIG_X86
58799+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58800+#else
58801+#define INIT_TASK_THREAD_INFO
58802+#endif
58803+
58804 /*
58805 * INIT_TASK is used to set up the first task table, touch at
58806 * your own risk!. Base=0, limit=0x1fffff (=2MB)
58807@@ -164,6 +170,7 @@ extern struct cred init_cred;
58808 RCU_INIT_POINTER(.cred, &init_cred), \
58809 .comm = "swapper", \
58810 .thread = INIT_THREAD, \
58811+ INIT_TASK_THREAD_INFO \
58812 .fs = &init_fs, \
58813 .files = &init_files, \
58814 .signal = &init_signals, \
58815diff -urNp linux-3.0.7/include/linux/intel-iommu.h linux-3.0.7/include/linux/intel-iommu.h
58816--- linux-3.0.7/include/linux/intel-iommu.h 2011-07-21 22:17:23.000000000 -0400
58817+++ linux-3.0.7/include/linux/intel-iommu.h 2011-08-23 21:47:56.000000000 -0400
58818@@ -296,7 +296,7 @@ struct iommu_flush {
58819 u8 fm, u64 type);
58820 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58821 unsigned int size_order, u64 type);
58822-};
58823+} __no_const;
58824
58825 enum {
58826 SR_DMAR_FECTL_REG,
58827diff -urNp linux-3.0.7/include/linux/interrupt.h linux-3.0.7/include/linux/interrupt.h
58828--- linux-3.0.7/include/linux/interrupt.h 2011-07-21 22:17:23.000000000 -0400
58829+++ linux-3.0.7/include/linux/interrupt.h 2011-08-23 21:47:56.000000000 -0400
58830@@ -422,7 +422,7 @@ enum
58831 /* map softirq index to softirq name. update 'softirq_to_name' in
58832 * kernel/softirq.c when adding a new softirq.
58833 */
58834-extern char *softirq_to_name[NR_SOFTIRQS];
58835+extern const char * const softirq_to_name[NR_SOFTIRQS];
58836
58837 /* softirq mask and active fields moved to irq_cpustat_t in
58838 * asm/hardirq.h to get better cache usage. KAO
58839@@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58840
58841 struct softirq_action
58842 {
58843- void (*action)(struct softirq_action *);
58844+ void (*action)(void);
58845 };
58846
58847 asmlinkage void do_softirq(void);
58848 asmlinkage void __do_softirq(void);
58849-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58850+extern void open_softirq(int nr, void (*action)(void));
58851 extern void softirq_init(void);
58852 static inline void __raise_softirq_irqoff(unsigned int nr)
58853 {
58854diff -urNp linux-3.0.7/include/linux/kallsyms.h linux-3.0.7/include/linux/kallsyms.h
58855--- linux-3.0.7/include/linux/kallsyms.h 2011-07-21 22:17:23.000000000 -0400
58856+++ linux-3.0.7/include/linux/kallsyms.h 2011-08-23 21:48:14.000000000 -0400
58857@@ -15,7 +15,8 @@
58858
58859 struct module;
58860
58861-#ifdef CONFIG_KALLSYMS
58862+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58863+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58864 /* Lookup the address for a symbol. Returns 0 if not found. */
58865 unsigned long kallsyms_lookup_name(const char *name);
58866
58867@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
58868 /* Stupid that this does nothing, but I didn't create this mess. */
58869 #define __print_symbol(fmt, addr)
58870 #endif /*CONFIG_KALLSYMS*/
58871+#else /* when included by kallsyms.c, vsnprintf.c, or
58872+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58873+extern void __print_symbol(const char *fmt, unsigned long address);
58874+extern int sprint_backtrace(char *buffer, unsigned long address);
58875+extern int sprint_symbol(char *buffer, unsigned long address);
58876+const char *kallsyms_lookup(unsigned long addr,
58877+ unsigned long *symbolsize,
58878+ unsigned long *offset,
58879+ char **modname, char *namebuf);
58880+#endif
58881
58882 /* This macro allows us to keep printk typechecking */
58883 static void __check_printsym_format(const char *fmt, ...)
58884diff -urNp linux-3.0.7/include/linux/kgdb.h linux-3.0.7/include/linux/kgdb.h
58885--- linux-3.0.7/include/linux/kgdb.h 2011-07-21 22:17:23.000000000 -0400
58886+++ linux-3.0.7/include/linux/kgdb.h 2011-08-26 19:49:56.000000000 -0400
58887@@ -53,7 +53,7 @@ extern int kgdb_connected;
58888 extern int kgdb_io_module_registered;
58889
58890 extern atomic_t kgdb_setting_breakpoint;
58891-extern atomic_t kgdb_cpu_doing_single_step;
58892+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
58893
58894 extern struct task_struct *kgdb_usethread;
58895 extern struct task_struct *kgdb_contthread;
58896@@ -251,7 +251,7 @@ struct kgdb_arch {
58897 void (*disable_hw_break)(struct pt_regs *regs);
58898 void (*remove_all_hw_break)(void);
58899 void (*correct_hw_break)(void);
58900-};
58901+} __do_const;
58902
58903 /**
58904 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
58905@@ -276,7 +276,7 @@ struct kgdb_io {
58906 void (*pre_exception) (void);
58907 void (*post_exception) (void);
58908 int is_console;
58909-};
58910+} __do_const;
58911
58912 extern struct kgdb_arch arch_kgdb_ops;
58913
58914diff -urNp linux-3.0.7/include/linux/kmod.h linux-3.0.7/include/linux/kmod.h
58915--- linux-3.0.7/include/linux/kmod.h 2011-07-21 22:17:23.000000000 -0400
58916+++ linux-3.0.7/include/linux/kmod.h 2011-08-23 21:48:14.000000000 -0400
58917@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
58918 * usually useless though. */
58919 extern int __request_module(bool wait, const char *name, ...) \
58920 __attribute__((format(printf, 2, 3)));
58921+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
58922+ __attribute__((format(printf, 3, 4)));
58923 #define request_module(mod...) __request_module(true, mod)
58924 #define request_module_nowait(mod...) __request_module(false, mod)
58925 #define try_then_request_module(x, mod...) \
58926diff -urNp linux-3.0.7/include/linux/kvm_host.h linux-3.0.7/include/linux/kvm_host.h
58927--- linux-3.0.7/include/linux/kvm_host.h 2011-07-21 22:17:23.000000000 -0400
58928+++ linux-3.0.7/include/linux/kvm_host.h 2011-08-23 21:47:56.000000000 -0400
58929@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
58930 void vcpu_load(struct kvm_vcpu *vcpu);
58931 void vcpu_put(struct kvm_vcpu *vcpu);
58932
58933-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58934+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58935 struct module *module);
58936 void kvm_exit(void);
58937
58938@@ -446,7 +446,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
58939 struct kvm_guest_debug *dbg);
58940 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
58941
58942-int kvm_arch_init(void *opaque);
58943+int kvm_arch_init(const void *opaque);
58944 void kvm_arch_exit(void);
58945
58946 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
58947diff -urNp linux-3.0.7/include/linux/libata.h linux-3.0.7/include/linux/libata.h
58948--- linux-3.0.7/include/linux/libata.h 2011-07-21 22:17:23.000000000 -0400
58949+++ linux-3.0.7/include/linux/libata.h 2011-08-26 19:49:56.000000000 -0400
58950@@ -899,7 +899,7 @@ struct ata_port_operations {
58951 * fields must be pointers.
58952 */
58953 const struct ata_port_operations *inherits;
58954-};
58955+} __do_const;
58956
58957 struct ata_port_info {
58958 unsigned long flags;
58959diff -urNp linux-3.0.7/include/linux/linkage.h linux-3.0.7/include/linux/linkage.h
58960--- linux-3.0.7/include/linux/linkage.h 2011-07-21 22:17:23.000000000 -0400
58961+++ linux-3.0.7/include/linux/linkage.h 2011-10-11 10:44:33.000000000 -0400
58962@@ -82,6 +82,7 @@
58963 */
58964 #ifndef ENDPROC
58965 #define ENDPROC(name) \
58966+ .size name, .-name; \
58967 .type name, @function; \
58968 END(name)
58969 #endif
58970diff -urNp linux-3.0.7/include/linux/mca.h linux-3.0.7/include/linux/mca.h
58971--- linux-3.0.7/include/linux/mca.h 2011-07-21 22:17:23.000000000 -0400
58972+++ linux-3.0.7/include/linux/mca.h 2011-08-23 21:47:56.000000000 -0400
58973@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
58974 int region);
58975 void * (*mca_transform_memory)(struct mca_device *,
58976 void *memory);
58977-};
58978+} __no_const;
58979
58980 struct mca_bus {
58981 u64 default_dma_mask;
58982diff -urNp linux-3.0.7/include/linux/memory.h linux-3.0.7/include/linux/memory.h
58983--- linux-3.0.7/include/linux/memory.h 2011-07-21 22:17:23.000000000 -0400
58984+++ linux-3.0.7/include/linux/memory.h 2011-08-23 21:47:56.000000000 -0400
58985@@ -144,7 +144,7 @@ struct memory_accessor {
58986 size_t count);
58987 ssize_t (*write)(struct memory_accessor *, const char *buf,
58988 off_t offset, size_t count);
58989-};
58990+} __no_const;
58991
58992 /*
58993 * Kernel text modification mutex, used for code patching. Users of this lock
58994diff -urNp linux-3.0.7/include/linux/mfd/abx500.h linux-3.0.7/include/linux/mfd/abx500.h
58995--- linux-3.0.7/include/linux/mfd/abx500.h 2011-07-21 22:17:23.000000000 -0400
58996+++ linux-3.0.7/include/linux/mfd/abx500.h 2011-08-23 21:47:56.000000000 -0400
58997@@ -234,6 +234,7 @@ struct abx500_ops {
58998 int (*event_registers_startup_state_get) (struct device *, u8 *);
58999 int (*startup_irq_enabled) (struct device *, unsigned int);
59000 };
59001+typedef struct abx500_ops __no_const abx500_ops_no_const;
59002
59003 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59004 void abx500_remove_ops(struct device *dev);
59005diff -urNp linux-3.0.7/include/linux/mm.h linux-3.0.7/include/linux/mm.h
59006--- linux-3.0.7/include/linux/mm.h 2011-09-02 18:11:21.000000000 -0400
59007+++ linux-3.0.7/include/linux/mm.h 2011-08-23 21:47:56.000000000 -0400
59008@@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
59009
59010 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59011 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59012+
59013+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59014+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59015+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59016+#else
59017 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59018+#endif
59019+
59020 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59021 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59022
59023@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
59024 int set_page_dirty_lock(struct page *page);
59025 int clear_page_dirty_for_io(struct page *page);
59026
59027-/* Is the vma a continuation of the stack vma above it? */
59028-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59029-{
59030- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59031-}
59032-
59033-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59034- unsigned long addr)
59035-{
59036- return (vma->vm_flags & VM_GROWSDOWN) &&
59037- (vma->vm_start == addr) &&
59038- !vma_growsdown(vma->vm_prev, addr);
59039-}
59040-
59041-/* Is the vma a continuation of the stack vma below it? */
59042-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59043-{
59044- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59045-}
59046-
59047-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59048- unsigned long addr)
59049-{
59050- return (vma->vm_flags & VM_GROWSUP) &&
59051- (vma->vm_end == addr) &&
59052- !vma_growsup(vma->vm_next, addr);
59053-}
59054-
59055 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59056 unsigned long old_addr, struct vm_area_struct *new_vma,
59057 unsigned long new_addr, unsigned long len);
59058@@ -1169,6 +1148,15 @@ struct shrinker {
59059 extern void register_shrinker(struct shrinker *);
59060 extern void unregister_shrinker(struct shrinker *);
59061
59062+#ifdef CONFIG_MMU
59063+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59064+#else
59065+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59066+{
59067+ return __pgprot(0);
59068+}
59069+#endif
59070+
59071 int vma_wants_writenotify(struct vm_area_struct *vma);
59072
59073 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59074@@ -1452,6 +1440,7 @@ out:
59075 }
59076
59077 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59078+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59079
59080 extern unsigned long do_brk(unsigned long, unsigned long);
59081
59082@@ -1510,6 +1499,10 @@ extern struct vm_area_struct * find_vma(
59083 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59084 struct vm_area_struct **pprev);
59085
59086+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59087+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59088+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59089+
59090 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59091 NULL if none. Assume start_addr < end_addr. */
59092 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59093@@ -1526,15 +1519,6 @@ static inline unsigned long vma_pages(st
59094 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59095 }
59096
59097-#ifdef CONFIG_MMU
59098-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59099-#else
59100-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59101-{
59102- return __pgprot(0);
59103-}
59104-#endif
59105-
59106 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59107 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59108 unsigned long pfn, unsigned long size, pgprot_t);
59109@@ -1647,7 +1631,7 @@ extern int unpoison_memory(unsigned long
59110 extern int sysctl_memory_failure_early_kill;
59111 extern int sysctl_memory_failure_recovery;
59112 extern void shake_page(struct page *p, int access);
59113-extern atomic_long_t mce_bad_pages;
59114+extern atomic_long_unchecked_t mce_bad_pages;
59115 extern int soft_offline_page(struct page *page, int flags);
59116
59117 extern void dump_page(struct page *page);
59118@@ -1661,5 +1645,11 @@ extern void copy_user_huge_page(struct p
59119 unsigned int pages_per_huge_page);
59120 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59121
59122+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59123+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59124+#else
59125+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59126+#endif
59127+
59128 #endif /* __KERNEL__ */
59129 #endif /* _LINUX_MM_H */
59130diff -urNp linux-3.0.7/include/linux/mm_types.h linux-3.0.7/include/linux/mm_types.h
59131--- linux-3.0.7/include/linux/mm_types.h 2011-07-21 22:17:23.000000000 -0400
59132+++ linux-3.0.7/include/linux/mm_types.h 2011-08-23 21:47:56.000000000 -0400
59133@@ -184,6 +184,8 @@ struct vm_area_struct {
59134 #ifdef CONFIG_NUMA
59135 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59136 #endif
59137+
59138+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59139 };
59140
59141 struct core_thread {
59142@@ -316,6 +318,24 @@ struct mm_struct {
59143 #ifdef CONFIG_CPUMASK_OFFSTACK
59144 struct cpumask cpumask_allocation;
59145 #endif
59146+
59147+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59148+ unsigned long pax_flags;
59149+#endif
59150+
59151+#ifdef CONFIG_PAX_DLRESOLVE
59152+ unsigned long call_dl_resolve;
59153+#endif
59154+
59155+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59156+ unsigned long call_syscall;
59157+#endif
59158+
59159+#ifdef CONFIG_PAX_ASLR
59160+ unsigned long delta_mmap; /* randomized offset */
59161+ unsigned long delta_stack; /* randomized offset */
59162+#endif
59163+
59164 };
59165
59166 static inline void mm_init_cpumask(struct mm_struct *mm)
59167diff -urNp linux-3.0.7/include/linux/mmu_notifier.h linux-3.0.7/include/linux/mmu_notifier.h
59168--- linux-3.0.7/include/linux/mmu_notifier.h 2011-07-21 22:17:23.000000000 -0400
59169+++ linux-3.0.7/include/linux/mmu_notifier.h 2011-08-23 21:47:56.000000000 -0400
59170@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59171 */
59172 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59173 ({ \
59174- pte_t __pte; \
59175+ pte_t ___pte; \
59176 struct vm_area_struct *___vma = __vma; \
59177 unsigned long ___address = __address; \
59178- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59179+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59180 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59181- __pte; \
59182+ ___pte; \
59183 })
59184
59185 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59186diff -urNp linux-3.0.7/include/linux/mmzone.h linux-3.0.7/include/linux/mmzone.h
59187--- linux-3.0.7/include/linux/mmzone.h 2011-07-21 22:17:23.000000000 -0400
59188+++ linux-3.0.7/include/linux/mmzone.h 2011-08-23 21:47:56.000000000 -0400
59189@@ -350,7 +350,7 @@ struct zone {
59190 unsigned long flags; /* zone flags, see below */
59191
59192 /* Zone statistics */
59193- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59194+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59195
59196 /*
59197 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59198diff -urNp linux-3.0.7/include/linux/mod_devicetable.h linux-3.0.7/include/linux/mod_devicetable.h
59199--- linux-3.0.7/include/linux/mod_devicetable.h 2011-07-21 22:17:23.000000000 -0400
59200+++ linux-3.0.7/include/linux/mod_devicetable.h 2011-08-23 21:47:56.000000000 -0400
59201@@ -12,7 +12,7 @@
59202 typedef unsigned long kernel_ulong_t;
59203 #endif
59204
59205-#define PCI_ANY_ID (~0)
59206+#define PCI_ANY_ID ((__u16)~0)
59207
59208 struct pci_device_id {
59209 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59210@@ -131,7 +131,7 @@ struct usb_device_id {
59211 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59212 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59213
59214-#define HID_ANY_ID (~0)
59215+#define HID_ANY_ID (~0U)
59216
59217 struct hid_device_id {
59218 __u16 bus;
59219diff -urNp linux-3.0.7/include/linux/module.h linux-3.0.7/include/linux/module.h
59220--- linux-3.0.7/include/linux/module.h 2011-07-21 22:17:23.000000000 -0400
59221+++ linux-3.0.7/include/linux/module.h 2011-08-23 21:47:56.000000000 -0400
59222@@ -16,6 +16,7 @@
59223 #include <linux/kobject.h>
59224 #include <linux/moduleparam.h>
59225 #include <linux/tracepoint.h>
59226+#include <linux/fs.h>
59227
59228 #include <linux/percpu.h>
59229 #include <asm/module.h>
59230@@ -325,19 +326,16 @@ struct module
59231 int (*init)(void);
59232
59233 /* If this is non-NULL, vfree after init() returns */
59234- void *module_init;
59235+ void *module_init_rx, *module_init_rw;
59236
59237 /* Here is the actual code + data, vfree'd on unload. */
59238- void *module_core;
59239+ void *module_core_rx, *module_core_rw;
59240
59241 /* Here are the sizes of the init and core sections */
59242- unsigned int init_size, core_size;
59243+ unsigned int init_size_rw, core_size_rw;
59244
59245 /* The size of the executable code in each section. */
59246- unsigned int init_text_size, core_text_size;
59247-
59248- /* Size of RO sections of the module (text+rodata) */
59249- unsigned int init_ro_size, core_ro_size;
59250+ unsigned int init_size_rx, core_size_rx;
59251
59252 /* Arch-specific module values */
59253 struct mod_arch_specific arch;
59254@@ -393,6 +391,10 @@ struct module
59255 #ifdef CONFIG_EVENT_TRACING
59256 struct ftrace_event_call **trace_events;
59257 unsigned int num_trace_events;
59258+ struct file_operations trace_id;
59259+ struct file_operations trace_enable;
59260+ struct file_operations trace_format;
59261+ struct file_operations trace_filter;
59262 #endif
59263 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59264 unsigned int num_ftrace_callsites;
59265@@ -443,16 +445,46 @@ bool is_module_address(unsigned long add
59266 bool is_module_percpu_address(unsigned long addr);
59267 bool is_module_text_address(unsigned long addr);
59268
59269+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59270+{
59271+
59272+#ifdef CONFIG_PAX_KERNEXEC
59273+ if (ktla_ktva(addr) >= (unsigned long)start &&
59274+ ktla_ktva(addr) < (unsigned long)start + size)
59275+ return 1;
59276+#endif
59277+
59278+ return ((void *)addr >= start && (void *)addr < start + size);
59279+}
59280+
59281+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59282+{
59283+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59284+}
59285+
59286+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59287+{
59288+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59289+}
59290+
59291+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59292+{
59293+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59294+}
59295+
59296+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59297+{
59298+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59299+}
59300+
59301 static inline int within_module_core(unsigned long addr, struct module *mod)
59302 {
59303- return (unsigned long)mod->module_core <= addr &&
59304- addr < (unsigned long)mod->module_core + mod->core_size;
59305+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59306 }
59307
59308 static inline int within_module_init(unsigned long addr, struct module *mod)
59309 {
59310- return (unsigned long)mod->module_init <= addr &&
59311- addr < (unsigned long)mod->module_init + mod->init_size;
59312+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59313 }
59314
59315 /* Search for module by name: must hold module_mutex. */
59316diff -urNp linux-3.0.7/include/linux/moduleloader.h linux-3.0.7/include/linux/moduleloader.h
59317--- linux-3.0.7/include/linux/moduleloader.h 2011-07-21 22:17:23.000000000 -0400
59318+++ linux-3.0.7/include/linux/moduleloader.h 2011-08-23 21:47:56.000000000 -0400
59319@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59320 sections. Returns NULL on failure. */
59321 void *module_alloc(unsigned long size);
59322
59323+#ifdef CONFIG_PAX_KERNEXEC
59324+void *module_alloc_exec(unsigned long size);
59325+#else
59326+#define module_alloc_exec(x) module_alloc(x)
59327+#endif
59328+
59329 /* Free memory returned from module_alloc. */
59330 void module_free(struct module *mod, void *module_region);
59331
59332+#ifdef CONFIG_PAX_KERNEXEC
59333+void module_free_exec(struct module *mod, void *module_region);
59334+#else
59335+#define module_free_exec(x, y) module_free((x), (y))
59336+#endif
59337+
59338 /* Apply the given relocation to the (simplified) ELF. Return -error
59339 or 0. */
59340 int apply_relocate(Elf_Shdr *sechdrs,
59341diff -urNp linux-3.0.7/include/linux/moduleparam.h linux-3.0.7/include/linux/moduleparam.h
59342--- linux-3.0.7/include/linux/moduleparam.h 2011-07-21 22:17:23.000000000 -0400
59343+++ linux-3.0.7/include/linux/moduleparam.h 2011-08-23 21:47:56.000000000 -0400
59344@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59345 * @len is usually just sizeof(string).
59346 */
59347 #define module_param_string(name, string, len, perm) \
59348- static const struct kparam_string __param_string_##name \
59349+ static const struct kparam_string __param_string_##name __used \
59350 = { len, string }; \
59351 __module_param_call(MODULE_PARAM_PREFIX, name, \
59352 &param_ops_string, \
59353@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59354 * module_param_named() for why this might be necessary.
59355 */
59356 #define module_param_array_named(name, array, type, nump, perm) \
59357- static const struct kparam_array __param_arr_##name \
59358+ static const struct kparam_array __param_arr_##name __used \
59359 = { .max = ARRAY_SIZE(array), .num = nump, \
59360 .ops = &param_ops_##type, \
59361 .elemsize = sizeof(array[0]), .elem = array }; \
59362diff -urNp linux-3.0.7/include/linux/namei.h linux-3.0.7/include/linux/namei.h
59363--- linux-3.0.7/include/linux/namei.h 2011-07-21 22:17:23.000000000 -0400
59364+++ linux-3.0.7/include/linux/namei.h 2011-08-23 21:47:56.000000000 -0400
59365@@ -24,7 +24,7 @@ struct nameidata {
59366 unsigned seq;
59367 int last_type;
59368 unsigned depth;
59369- char *saved_names[MAX_NESTED_LINKS + 1];
59370+ const char *saved_names[MAX_NESTED_LINKS + 1];
59371
59372 /* Intent data */
59373 union {
59374@@ -91,12 +91,12 @@ extern int follow_up(struct path *);
59375 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59376 extern void unlock_rename(struct dentry *, struct dentry *);
59377
59378-static inline void nd_set_link(struct nameidata *nd, char *path)
59379+static inline void nd_set_link(struct nameidata *nd, const char *path)
59380 {
59381 nd->saved_names[nd->depth] = path;
59382 }
59383
59384-static inline char *nd_get_link(struct nameidata *nd)
59385+static inline const char *nd_get_link(const struct nameidata *nd)
59386 {
59387 return nd->saved_names[nd->depth];
59388 }
59389diff -urNp linux-3.0.7/include/linux/netdevice.h linux-3.0.7/include/linux/netdevice.h
59390--- linux-3.0.7/include/linux/netdevice.h 2011-09-02 18:11:21.000000000 -0400
59391+++ linux-3.0.7/include/linux/netdevice.h 2011-08-23 21:47:56.000000000 -0400
59392@@ -979,6 +979,7 @@ struct net_device_ops {
59393 int (*ndo_set_features)(struct net_device *dev,
59394 u32 features);
59395 };
59396+typedef struct net_device_ops __no_const net_device_ops_no_const;
59397
59398 /*
59399 * The DEVICE structure.
59400diff -urNp linux-3.0.7/include/linux/netfilter/xt_gradm.h linux-3.0.7/include/linux/netfilter/xt_gradm.h
59401--- linux-3.0.7/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59402+++ linux-3.0.7/include/linux/netfilter/xt_gradm.h 2011-08-23 21:48:14.000000000 -0400
59403@@ -0,0 +1,9 @@
59404+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59405+#define _LINUX_NETFILTER_XT_GRADM_H 1
59406+
59407+struct xt_gradm_mtinfo {
59408+ __u16 flags;
59409+ __u16 invflags;
59410+};
59411+
59412+#endif
59413diff -urNp linux-3.0.7/include/linux/of_pdt.h linux-3.0.7/include/linux/of_pdt.h
59414--- linux-3.0.7/include/linux/of_pdt.h 2011-07-21 22:17:23.000000000 -0400
59415+++ linux-3.0.7/include/linux/of_pdt.h 2011-08-30 06:20:11.000000000 -0400
59416@@ -32,7 +32,7 @@ struct of_pdt_ops {
59417
59418 /* return 0 on success; fill in 'len' with number of bytes in path */
59419 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59420-};
59421+} __no_const;
59422
59423 extern void *prom_early_alloc(unsigned long size);
59424
59425diff -urNp linux-3.0.7/include/linux/oprofile.h linux-3.0.7/include/linux/oprofile.h
59426--- linux-3.0.7/include/linux/oprofile.h 2011-07-21 22:17:23.000000000 -0400
59427+++ linux-3.0.7/include/linux/oprofile.h 2011-08-23 21:47:56.000000000 -0400
59428@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59429 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59430 char const * name, ulong * val);
59431
59432-/** Create a file for read-only access to an atomic_t. */
59433+/** Create a file for read-only access to an atomic_unchecked_t. */
59434 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59435- char const * name, atomic_t * val);
59436+ char const * name, atomic_unchecked_t * val);
59437
59438 /** create a directory */
59439 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59440diff -urNp linux-3.0.7/include/linux/padata.h linux-3.0.7/include/linux/padata.h
59441--- linux-3.0.7/include/linux/padata.h 2011-07-21 22:17:23.000000000 -0400
59442+++ linux-3.0.7/include/linux/padata.h 2011-08-23 21:47:56.000000000 -0400
59443@@ -129,7 +129,7 @@ struct parallel_data {
59444 struct padata_instance *pinst;
59445 struct padata_parallel_queue __percpu *pqueue;
59446 struct padata_serial_queue __percpu *squeue;
59447- atomic_t seq_nr;
59448+ atomic_unchecked_t seq_nr;
59449 atomic_t reorder_objects;
59450 atomic_t refcnt;
59451 unsigned int max_seq_nr;
59452diff -urNp linux-3.0.7/include/linux/perf_event.h linux-3.0.7/include/linux/perf_event.h
59453--- linux-3.0.7/include/linux/perf_event.h 2011-07-21 22:17:23.000000000 -0400
59454+++ linux-3.0.7/include/linux/perf_event.h 2011-08-23 21:47:56.000000000 -0400
59455@@ -761,8 +761,8 @@ struct perf_event {
59456
59457 enum perf_event_active_state state;
59458 unsigned int attach_state;
59459- local64_t count;
59460- atomic64_t child_count;
59461+ local64_t count; /* PaX: fix it one day */
59462+ atomic64_unchecked_t child_count;
59463
59464 /*
59465 * These are the total time in nanoseconds that the event
59466@@ -813,8 +813,8 @@ struct perf_event {
59467 * These accumulate total time (in nanoseconds) that children
59468 * events have been enabled and running, respectively.
59469 */
59470- atomic64_t child_total_time_enabled;
59471- atomic64_t child_total_time_running;
59472+ atomic64_unchecked_t child_total_time_enabled;
59473+ atomic64_unchecked_t child_total_time_running;
59474
59475 /*
59476 * Protect attach/detach and child_list:
59477diff -urNp linux-3.0.7/include/linux/pipe_fs_i.h linux-3.0.7/include/linux/pipe_fs_i.h
59478--- linux-3.0.7/include/linux/pipe_fs_i.h 2011-07-21 22:17:23.000000000 -0400
59479+++ linux-3.0.7/include/linux/pipe_fs_i.h 2011-08-23 21:47:56.000000000 -0400
59480@@ -46,9 +46,9 @@ struct pipe_buffer {
59481 struct pipe_inode_info {
59482 wait_queue_head_t wait;
59483 unsigned int nrbufs, curbuf, buffers;
59484- unsigned int readers;
59485- unsigned int writers;
59486- unsigned int waiting_writers;
59487+ atomic_t readers;
59488+ atomic_t writers;
59489+ atomic_t waiting_writers;
59490 unsigned int r_counter;
59491 unsigned int w_counter;
59492 struct page *tmp_page;
59493diff -urNp linux-3.0.7/include/linux/pm_runtime.h linux-3.0.7/include/linux/pm_runtime.h
59494--- linux-3.0.7/include/linux/pm_runtime.h 2011-07-21 22:17:23.000000000 -0400
59495+++ linux-3.0.7/include/linux/pm_runtime.h 2011-08-23 21:47:56.000000000 -0400
59496@@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
59497
59498 static inline void pm_runtime_mark_last_busy(struct device *dev)
59499 {
59500- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59501+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59502 }
59503
59504 #else /* !CONFIG_PM_RUNTIME */
59505diff -urNp linux-3.0.7/include/linux/poison.h linux-3.0.7/include/linux/poison.h
59506--- linux-3.0.7/include/linux/poison.h 2011-07-21 22:17:23.000000000 -0400
59507+++ linux-3.0.7/include/linux/poison.h 2011-08-23 21:47:56.000000000 -0400
59508@@ -19,8 +19,8 @@
59509 * under normal circumstances, used to verify that nobody uses
59510 * non-initialized list entries.
59511 */
59512-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59513-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59514+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59515+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59516
59517 /********** include/linux/timer.h **********/
59518 /*
59519diff -urNp linux-3.0.7/include/linux/preempt.h linux-3.0.7/include/linux/preempt.h
59520--- linux-3.0.7/include/linux/preempt.h 2011-07-21 22:17:23.000000000 -0400
59521+++ linux-3.0.7/include/linux/preempt.h 2011-08-23 21:47:56.000000000 -0400
59522@@ -115,7 +115,7 @@ struct preempt_ops {
59523 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59524 void (*sched_out)(struct preempt_notifier *notifier,
59525 struct task_struct *next);
59526-};
59527+} __no_const;
59528
59529 /**
59530 * preempt_notifier - key for installing preemption notifiers
59531diff -urNp linux-3.0.7/include/linux/proc_fs.h linux-3.0.7/include/linux/proc_fs.h
59532--- linux-3.0.7/include/linux/proc_fs.h 2011-07-21 22:17:23.000000000 -0400
59533+++ linux-3.0.7/include/linux/proc_fs.h 2011-08-23 21:48:14.000000000 -0400
59534@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59535 return proc_create_data(name, mode, parent, proc_fops, NULL);
59536 }
59537
59538+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59539+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59540+{
59541+#ifdef CONFIG_GRKERNSEC_PROC_USER
59542+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59543+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59544+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59545+#else
59546+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59547+#endif
59548+}
59549+
59550+
59551 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59552 mode_t mode, struct proc_dir_entry *base,
59553 read_proc_t *read_proc, void * data)
59554@@ -258,7 +271,7 @@ union proc_op {
59555 int (*proc_show)(struct seq_file *m,
59556 struct pid_namespace *ns, struct pid *pid,
59557 struct task_struct *task);
59558-};
59559+} __no_const;
59560
59561 struct ctl_table_header;
59562 struct ctl_table;
59563diff -urNp linux-3.0.7/include/linux/ptrace.h linux-3.0.7/include/linux/ptrace.h
59564--- linux-3.0.7/include/linux/ptrace.h 2011-07-21 22:17:23.000000000 -0400
59565+++ linux-3.0.7/include/linux/ptrace.h 2011-08-23 21:48:14.000000000 -0400
59566@@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
59567 extern void exit_ptrace(struct task_struct *tracer);
59568 #define PTRACE_MODE_READ 1
59569 #define PTRACE_MODE_ATTACH 2
59570-/* Returns 0 on success, -errno on denial. */
59571-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59572 /* Returns true on success, false on denial. */
59573 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59574+/* Returns true on success, false on denial. */
59575+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59576
59577 static inline int ptrace_reparented(struct task_struct *child)
59578 {
59579diff -urNp linux-3.0.7/include/linux/random.h linux-3.0.7/include/linux/random.h
59580--- linux-3.0.7/include/linux/random.h 2011-09-02 18:11:21.000000000 -0400
59581+++ linux-3.0.7/include/linux/random.h 2011-08-23 21:47:56.000000000 -0400
59582@@ -69,12 +69,17 @@ void srandom32(u32 seed);
59583
59584 u32 prandom32(struct rnd_state *);
59585
59586+static inline unsigned long pax_get_random_long(void)
59587+{
59588+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59589+}
59590+
59591 /*
59592 * Handle minimum values for seeds
59593 */
59594 static inline u32 __seed(u32 x, u32 m)
59595 {
59596- return (x < m) ? x + m : x;
59597+ return (x <= m) ? x + m + 1 : x;
59598 }
59599
59600 /**
59601diff -urNp linux-3.0.7/include/linux/reboot.h linux-3.0.7/include/linux/reboot.h
59602--- linux-3.0.7/include/linux/reboot.h 2011-07-21 22:17:23.000000000 -0400
59603+++ linux-3.0.7/include/linux/reboot.h 2011-08-23 21:47:56.000000000 -0400
59604@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59605 * Architecture-specific implementations of sys_reboot commands.
59606 */
59607
59608-extern void machine_restart(char *cmd);
59609-extern void machine_halt(void);
59610-extern void machine_power_off(void);
59611+extern void machine_restart(char *cmd) __noreturn;
59612+extern void machine_halt(void) __noreturn;
59613+extern void machine_power_off(void) __noreturn;
59614
59615 extern void machine_shutdown(void);
59616 struct pt_regs;
59617@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59618 */
59619
59620 extern void kernel_restart_prepare(char *cmd);
59621-extern void kernel_restart(char *cmd);
59622-extern void kernel_halt(void);
59623-extern void kernel_power_off(void);
59624+extern void kernel_restart(char *cmd) __noreturn;
59625+extern void kernel_halt(void) __noreturn;
59626+extern void kernel_power_off(void) __noreturn;
59627
59628 extern int C_A_D; /* for sysctl */
59629 void ctrl_alt_del(void);
59630@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
59631 * Emergency restart, callable from an interrupt handler.
59632 */
59633
59634-extern void emergency_restart(void);
59635+extern void emergency_restart(void) __noreturn;
59636 #include <asm/emergency-restart.h>
59637
59638 #endif
59639diff -urNp linux-3.0.7/include/linux/reiserfs_fs.h linux-3.0.7/include/linux/reiserfs_fs.h
59640--- linux-3.0.7/include/linux/reiserfs_fs.h 2011-07-21 22:17:23.000000000 -0400
59641+++ linux-3.0.7/include/linux/reiserfs_fs.h 2011-08-23 21:47:56.000000000 -0400
59642@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
59643 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59644
59645 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59646-#define get_generation(s) atomic_read (&fs_generation(s))
59647+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59648 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59649 #define __fs_changed(gen,s) (gen != get_generation (s))
59650 #define fs_changed(gen,s) \
59651diff -urNp linux-3.0.7/include/linux/reiserfs_fs_sb.h linux-3.0.7/include/linux/reiserfs_fs_sb.h
59652--- linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-07-21 22:17:23.000000000 -0400
59653+++ linux-3.0.7/include/linux/reiserfs_fs_sb.h 2011-08-23 21:47:56.000000000 -0400
59654@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
59655 /* Comment? -Hans */
59656 wait_queue_head_t s_wait;
59657 /* To be obsoleted soon by per buffer seals.. -Hans */
59658- atomic_t s_generation_counter; // increased by one every time the
59659+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59660 // tree gets re-balanced
59661 unsigned long s_properties; /* File system properties. Currently holds
59662 on-disk FS format */
59663diff -urNp linux-3.0.7/include/linux/relay.h linux-3.0.7/include/linux/relay.h
59664--- linux-3.0.7/include/linux/relay.h 2011-07-21 22:17:23.000000000 -0400
59665+++ linux-3.0.7/include/linux/relay.h 2011-08-23 21:47:56.000000000 -0400
59666@@ -159,7 +159,7 @@ struct rchan_callbacks
59667 * The callback should return 0 if successful, negative if not.
59668 */
59669 int (*remove_buf_file)(struct dentry *dentry);
59670-};
59671+} __no_const;
59672
59673 /*
59674 * CONFIG_RELAY kernel API, kernel/relay.c
59675diff -urNp linux-3.0.7/include/linux/rfkill.h linux-3.0.7/include/linux/rfkill.h
59676--- linux-3.0.7/include/linux/rfkill.h 2011-07-21 22:17:23.000000000 -0400
59677+++ linux-3.0.7/include/linux/rfkill.h 2011-08-23 21:47:56.000000000 -0400
59678@@ -147,6 +147,7 @@ struct rfkill_ops {
59679 void (*query)(struct rfkill *rfkill, void *data);
59680 int (*set_block)(void *data, bool blocked);
59681 };
59682+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59683
59684 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59685 /**
59686diff -urNp linux-3.0.7/include/linux/rmap.h linux-3.0.7/include/linux/rmap.h
59687--- linux-3.0.7/include/linux/rmap.h 2011-07-21 22:17:23.000000000 -0400
59688+++ linux-3.0.7/include/linux/rmap.h 2011-08-23 21:47:56.000000000 -0400
59689@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
59690 void anon_vma_init(void); /* create anon_vma_cachep */
59691 int anon_vma_prepare(struct vm_area_struct *);
59692 void unlink_anon_vmas(struct vm_area_struct *);
59693-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
59694-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
59695+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
59696+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
59697 void __anon_vma_link(struct vm_area_struct *);
59698
59699 static inline void anon_vma_merge(struct vm_area_struct *vma,
59700diff -urNp linux-3.0.7/include/linux/sched.h linux-3.0.7/include/linux/sched.h
59701--- linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:09.000000000 -0400
59702+++ linux-3.0.7/include/linux/sched.h 2011-10-17 23:17:19.000000000 -0400
59703@@ -100,6 +100,7 @@ struct bio_list;
59704 struct fs_struct;
59705 struct perf_event_context;
59706 struct blk_plug;
59707+struct linux_binprm;
59708
59709 /*
59710 * List of flags we want to share for kernel threads,
59711@@ -380,10 +381,13 @@ struct user_namespace;
59712 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59713
59714 extern int sysctl_max_map_count;
59715+extern unsigned long sysctl_heap_stack_gap;
59716
59717 #include <linux/aio.h>
59718
59719 #ifdef CONFIG_MMU
59720+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59721+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59722 extern void arch_pick_mmap_layout(struct mm_struct *mm);
59723 extern unsigned long
59724 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59725@@ -629,6 +633,17 @@ struct signal_struct {
59726 #ifdef CONFIG_TASKSTATS
59727 struct taskstats *stats;
59728 #endif
59729+
59730+#ifdef CONFIG_GRKERNSEC
59731+ u32 curr_ip;
59732+ u32 saved_ip;
59733+ u32 gr_saddr;
59734+ u32 gr_daddr;
59735+ u16 gr_sport;
59736+ u16 gr_dport;
59737+ u8 used_accept:1;
59738+#endif
59739+
59740 #ifdef CONFIG_AUDIT
59741 unsigned audit_tty;
59742 struct tty_audit_buf *tty_audit_buf;
59743@@ -710,6 +725,11 @@ struct user_struct {
59744 struct key *session_keyring; /* UID's default session keyring */
59745 #endif
59746
59747+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59748+ unsigned int banned;
59749+ unsigned long ban_expires;
59750+#endif
59751+
59752 /* Hash table maintenance information */
59753 struct hlist_node uidhash_node;
59754 uid_t uid;
59755@@ -1340,8 +1360,8 @@ struct task_struct {
59756 struct list_head thread_group;
59757
59758 struct completion *vfork_done; /* for vfork() */
59759- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59760- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59761+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59762+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59763
59764 cputime_t utime, stime, utimescaled, stimescaled;
59765 cputime_t gtime;
59766@@ -1357,13 +1377,6 @@ struct task_struct {
59767 struct task_cputime cputime_expires;
59768 struct list_head cpu_timers[3];
59769
59770-/* process credentials */
59771- const struct cred __rcu *real_cred; /* objective and real subjective task
59772- * credentials (COW) */
59773- const struct cred __rcu *cred; /* effective (overridable) subjective task
59774- * credentials (COW) */
59775- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59776-
59777 char comm[TASK_COMM_LEN]; /* executable name excluding path
59778 - access with [gs]et_task_comm (which lock
59779 it with task_lock())
59780@@ -1380,8 +1393,16 @@ struct task_struct {
59781 #endif
59782 /* CPU-specific state of this task */
59783 struct thread_struct thread;
59784+/* thread_info moved to task_struct */
59785+#ifdef CONFIG_X86
59786+ struct thread_info tinfo;
59787+#endif
59788 /* filesystem information */
59789 struct fs_struct *fs;
59790+
59791+ const struct cred __rcu *cred; /* effective (overridable) subjective task
59792+ * credentials (COW) */
59793+
59794 /* open file information */
59795 struct files_struct *files;
59796 /* namespaces */
59797@@ -1428,6 +1449,11 @@ struct task_struct {
59798 struct rt_mutex_waiter *pi_blocked_on;
59799 #endif
59800
59801+/* process credentials */
59802+ const struct cred __rcu *real_cred; /* objective and real subjective task
59803+ * credentials (COW) */
59804+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59805+
59806 #ifdef CONFIG_DEBUG_MUTEXES
59807 /* mutex deadlock detection */
59808 struct mutex_waiter *blocked_on;
59809@@ -1538,6 +1564,21 @@ struct task_struct {
59810 unsigned long default_timer_slack_ns;
59811
59812 struct list_head *scm_work_list;
59813+
59814+#ifdef CONFIG_GRKERNSEC
59815+ /* grsecurity */
59816+ struct dentry *gr_chroot_dentry;
59817+ struct acl_subject_label *acl;
59818+ struct acl_role_label *role;
59819+ struct file *exec_file;
59820+ u16 acl_role_id;
59821+ /* is this the task that authenticated to the special role */
59822+ u8 acl_sp_role;
59823+ u8 is_writable;
59824+ u8 brute;
59825+ u8 gr_is_chrooted;
59826+#endif
59827+
59828 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
59829 /* Index of current stored address in ret_stack */
59830 int curr_ret_stack;
59831@@ -1572,6 +1613,57 @@ struct task_struct {
59832 #endif
59833 };
59834
59835+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
59836+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
59837+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
59838+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
59839+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
59840+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
59841+
59842+#ifdef CONFIG_PAX_SOFTMODE
59843+extern int pax_softmode;
59844+#endif
59845+
59846+extern int pax_check_flags(unsigned long *);
59847+
59848+/* if tsk != current then task_lock must be held on it */
59849+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59850+static inline unsigned long pax_get_flags(struct task_struct *tsk)
59851+{
59852+ if (likely(tsk->mm))
59853+ return tsk->mm->pax_flags;
59854+ else
59855+ return 0UL;
59856+}
59857+
59858+/* if tsk != current then task_lock must be held on it */
59859+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
59860+{
59861+ if (likely(tsk->mm)) {
59862+ tsk->mm->pax_flags = flags;
59863+ return 0;
59864+ }
59865+ return -EINVAL;
59866+}
59867+#endif
59868+
59869+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59870+extern void pax_set_initial_flags(struct linux_binprm *bprm);
59871+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59872+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
59873+#endif
59874+
59875+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
59876+extern void pax_report_insns(void *pc, void *sp);
59877+extern void pax_report_refcount_overflow(struct pt_regs *regs);
59878+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
59879+
59880+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
59881+extern void pax_track_stack(void);
59882+#else
59883+static inline void pax_track_stack(void) {}
59884+#endif
59885+
59886 /* Future-safe accessor for struct task_struct's cpus_allowed. */
59887 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
59888
59889@@ -1768,6 +1860,7 @@ extern void thread_group_times(struct ta
59890 #define PF_DUMPCORE 0x00000200 /* dumped core */
59891 #define PF_SIGNALED 0x00000400 /* killed by a signal */
59892 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
59893+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
59894 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
59895 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
59896 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
59897@@ -2055,7 +2148,9 @@ void yield(void);
59898 extern struct exec_domain default_exec_domain;
59899
59900 union thread_union {
59901+#ifndef CONFIG_X86
59902 struct thread_info thread_info;
59903+#endif
59904 unsigned long stack[THREAD_SIZE/sizeof(long)];
59905 };
59906
59907@@ -2088,6 +2183,7 @@ extern struct pid_namespace init_pid_ns;
59908 */
59909
59910 extern struct task_struct *find_task_by_vpid(pid_t nr);
59911+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
59912 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
59913 struct pid_namespace *ns);
59914
59915@@ -2224,7 +2320,7 @@ extern void __cleanup_sighand(struct sig
59916 extern void exit_itimers(struct signal_struct *);
59917 extern void flush_itimer_signals(void);
59918
59919-extern NORET_TYPE void do_group_exit(int);
59920+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
59921
59922 extern void daemonize(const char *, ...);
59923 extern int allow_signal(int);
59924@@ -2392,13 +2488,17 @@ static inline unsigned long *end_of_stac
59925
59926 #endif
59927
59928-static inline int object_is_on_stack(void *obj)
59929+static inline int object_starts_on_stack(void *obj)
59930 {
59931- void *stack = task_stack_page(current);
59932+ const void *stack = task_stack_page(current);
59933
59934 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
59935 }
59936
59937+#ifdef CONFIG_PAX_USERCOPY
59938+extern int object_is_on_stack(const void *obj, unsigned long len);
59939+#endif
59940+
59941 extern void thread_info_cache_init(void);
59942
59943 #ifdef CONFIG_DEBUG_STACK_USAGE
59944diff -urNp linux-3.0.7/include/linux/screen_info.h linux-3.0.7/include/linux/screen_info.h
59945--- linux-3.0.7/include/linux/screen_info.h 2011-07-21 22:17:23.000000000 -0400
59946+++ linux-3.0.7/include/linux/screen_info.h 2011-08-23 21:47:56.000000000 -0400
59947@@ -43,7 +43,8 @@ struct screen_info {
59948 __u16 pages; /* 0x32 */
59949 __u16 vesa_attributes; /* 0x34 */
59950 __u32 capabilities; /* 0x36 */
59951- __u8 _reserved[6]; /* 0x3a */
59952+ __u16 vesapm_size; /* 0x3a */
59953+ __u8 _reserved[4]; /* 0x3c */
59954 } __attribute__((packed));
59955
59956 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
59957diff -urNp linux-3.0.7/include/linux/security.h linux-3.0.7/include/linux/security.h
59958--- linux-3.0.7/include/linux/security.h 2011-07-21 22:17:23.000000000 -0400
59959+++ linux-3.0.7/include/linux/security.h 2011-08-23 21:48:14.000000000 -0400
59960@@ -36,6 +36,7 @@
59961 #include <linux/key.h>
59962 #include <linux/xfrm.h>
59963 #include <linux/slab.h>
59964+#include <linux/grsecurity.h>
59965 #include <net/flow.h>
59966
59967 /* Maximum number of letters for an LSM name string */
59968diff -urNp linux-3.0.7/include/linux/seq_file.h linux-3.0.7/include/linux/seq_file.h
59969--- linux-3.0.7/include/linux/seq_file.h 2011-07-21 22:17:23.000000000 -0400
59970+++ linux-3.0.7/include/linux/seq_file.h 2011-08-23 21:47:56.000000000 -0400
59971@@ -32,6 +32,7 @@ struct seq_operations {
59972 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
59973 int (*show) (struct seq_file *m, void *v);
59974 };
59975+typedef struct seq_operations __no_const seq_operations_no_const;
59976
59977 #define SEQ_SKIP 1
59978
59979diff -urNp linux-3.0.7/include/linux/shmem_fs.h linux-3.0.7/include/linux/shmem_fs.h
59980--- linux-3.0.7/include/linux/shmem_fs.h 2011-07-21 22:17:23.000000000 -0400
59981+++ linux-3.0.7/include/linux/shmem_fs.h 2011-08-23 21:47:56.000000000 -0400
59982@@ -10,7 +10,7 @@
59983
59984 #define SHMEM_NR_DIRECT 16
59985
59986-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
59987+#define SHMEM_SYMLINK_INLINE_LEN 64
59988
59989 struct shmem_inode_info {
59990 spinlock_t lock;
59991diff -urNp linux-3.0.7/include/linux/shm.h linux-3.0.7/include/linux/shm.h
59992--- linux-3.0.7/include/linux/shm.h 2011-07-21 22:17:23.000000000 -0400
59993+++ linux-3.0.7/include/linux/shm.h 2011-08-23 21:48:14.000000000 -0400
59994@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
59995 pid_t shm_cprid;
59996 pid_t shm_lprid;
59997 struct user_struct *mlock_user;
59998+#ifdef CONFIG_GRKERNSEC
59999+ time_t shm_createtime;
60000+ pid_t shm_lapid;
60001+#endif
60002 };
60003
60004 /* shm_mode upper byte flags */
60005diff -urNp linux-3.0.7/include/linux/skbuff.h linux-3.0.7/include/linux/skbuff.h
60006--- linux-3.0.7/include/linux/skbuff.h 2011-07-21 22:17:23.000000000 -0400
60007+++ linux-3.0.7/include/linux/skbuff.h 2011-08-23 21:47:56.000000000 -0400
60008@@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
60009 */
60010 static inline int skb_queue_empty(const struct sk_buff_head *list)
60011 {
60012- return list->next == (struct sk_buff *)list;
60013+ return list->next == (const struct sk_buff *)list;
60014 }
60015
60016 /**
60017@@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
60018 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60019 const struct sk_buff *skb)
60020 {
60021- return skb->next == (struct sk_buff *)list;
60022+ return skb->next == (const struct sk_buff *)list;
60023 }
60024
60025 /**
60026@@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
60027 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60028 const struct sk_buff *skb)
60029 {
60030- return skb->prev == (struct sk_buff *)list;
60031+ return skb->prev == (const struct sk_buff *)list;
60032 }
60033
60034 /**
60035@@ -1440,7 +1440,7 @@ static inline int pskb_network_may_pull(
60036 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60037 */
60038 #ifndef NET_SKB_PAD
60039-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60040+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60041 #endif
60042
60043 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60044diff -urNp linux-3.0.7/include/linux/slab_def.h linux-3.0.7/include/linux/slab_def.h
60045--- linux-3.0.7/include/linux/slab_def.h 2011-07-21 22:17:23.000000000 -0400
60046+++ linux-3.0.7/include/linux/slab_def.h 2011-08-23 21:47:56.000000000 -0400
60047@@ -96,10 +96,10 @@ struct kmem_cache {
60048 unsigned long node_allocs;
60049 unsigned long node_frees;
60050 unsigned long node_overflow;
60051- atomic_t allochit;
60052- atomic_t allocmiss;
60053- atomic_t freehit;
60054- atomic_t freemiss;
60055+ atomic_unchecked_t allochit;
60056+ atomic_unchecked_t allocmiss;
60057+ atomic_unchecked_t freehit;
60058+ atomic_unchecked_t freemiss;
60059
60060 /*
60061 * If debugging is enabled, then the allocator can add additional
60062diff -urNp linux-3.0.7/include/linux/slab.h linux-3.0.7/include/linux/slab.h
60063--- linux-3.0.7/include/linux/slab.h 2011-07-21 22:17:23.000000000 -0400
60064+++ linux-3.0.7/include/linux/slab.h 2011-08-23 21:47:56.000000000 -0400
60065@@ -11,12 +11,20 @@
60066
60067 #include <linux/gfp.h>
60068 #include <linux/types.h>
60069+#include <linux/err.h>
60070
60071 /*
60072 * Flags to pass to kmem_cache_create().
60073 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60074 */
60075 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60076+
60077+#ifdef CONFIG_PAX_USERCOPY
60078+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60079+#else
60080+#define SLAB_USERCOPY 0x00000000UL
60081+#endif
60082+
60083 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60084 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60085 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60086@@ -87,10 +95,13 @@
60087 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60088 * Both make kfree a no-op.
60089 */
60090-#define ZERO_SIZE_PTR ((void *)16)
60091+#define ZERO_SIZE_PTR \
60092+({ \
60093+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60094+ (void *)(-MAX_ERRNO-1L); \
60095+})
60096
60097-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60098- (unsigned long)ZERO_SIZE_PTR)
60099+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60100
60101 /*
60102 * struct kmem_cache related prototypes
60103@@ -141,6 +152,7 @@ void * __must_check krealloc(const void
60104 void kfree(const void *);
60105 void kzfree(const void *);
60106 size_t ksize(const void *);
60107+void check_object_size(const void *ptr, unsigned long n, bool to);
60108
60109 /*
60110 * Allocator specific definitions. These are mainly used to establish optimized
60111@@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
60112
60113 void __init kmem_cache_init_late(void);
60114
60115+#define kmalloc(x, y) \
60116+({ \
60117+ void *___retval; \
60118+ intoverflow_t ___x = (intoverflow_t)x; \
60119+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60120+ ___retval = NULL; \
60121+ else \
60122+ ___retval = kmalloc((size_t)___x, (y)); \
60123+ ___retval; \
60124+})
60125+
60126+#define kmalloc_node(x, y, z) \
60127+({ \
60128+ void *___retval; \
60129+ intoverflow_t ___x = (intoverflow_t)x; \
60130+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60131+ ___retval = NULL; \
60132+ else \
60133+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60134+ ___retval; \
60135+})
60136+
60137+#define kzalloc(x, y) \
60138+({ \
60139+ void *___retval; \
60140+ intoverflow_t ___x = (intoverflow_t)x; \
60141+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60142+ ___retval = NULL; \
60143+ else \
60144+ ___retval = kzalloc((size_t)___x, (y)); \
60145+ ___retval; \
60146+})
60147+
60148+#define __krealloc(x, y, z) \
60149+({ \
60150+ void *___retval; \
60151+ intoverflow_t ___y = (intoverflow_t)y; \
60152+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60153+ ___retval = NULL; \
60154+ else \
60155+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60156+ ___retval; \
60157+})
60158+
60159+#define krealloc(x, y, z) \
60160+({ \
60161+ void *___retval; \
60162+ intoverflow_t ___y = (intoverflow_t)y; \
60163+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60164+ ___retval = NULL; \
60165+ else \
60166+ ___retval = krealloc((x), (size_t)___y, (z)); \
60167+ ___retval; \
60168+})
60169+
60170 #endif /* _LINUX_SLAB_H */
60171diff -urNp linux-3.0.7/include/linux/slub_def.h linux-3.0.7/include/linux/slub_def.h
60172--- linux-3.0.7/include/linux/slub_def.h 2011-07-21 22:17:23.000000000 -0400
60173+++ linux-3.0.7/include/linux/slub_def.h 2011-08-23 21:47:56.000000000 -0400
60174@@ -82,7 +82,7 @@ struct kmem_cache {
60175 struct kmem_cache_order_objects max;
60176 struct kmem_cache_order_objects min;
60177 gfp_t allocflags; /* gfp flags to use on each alloc */
60178- int refcount; /* Refcount for slab cache destroy */
60179+ atomic_t refcount; /* Refcount for slab cache destroy */
60180 void (*ctor)(void *);
60181 int inuse; /* Offset to metadata */
60182 int align; /* Alignment */
60183@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
60184 }
60185
60186 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60187-void *__kmalloc(size_t size, gfp_t flags);
60188+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60189
60190 static __always_inline void *
60191 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60192diff -urNp linux-3.0.7/include/linux/sonet.h linux-3.0.7/include/linux/sonet.h
60193--- linux-3.0.7/include/linux/sonet.h 2011-07-21 22:17:23.000000000 -0400
60194+++ linux-3.0.7/include/linux/sonet.h 2011-08-23 21:47:56.000000000 -0400
60195@@ -61,7 +61,7 @@ struct sonet_stats {
60196 #include <asm/atomic.h>
60197
60198 struct k_sonet_stats {
60199-#define __HANDLE_ITEM(i) atomic_t i
60200+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60201 __SONET_ITEMS
60202 #undef __HANDLE_ITEM
60203 };
60204diff -urNp linux-3.0.7/include/linux/sunrpc/clnt.h linux-3.0.7/include/linux/sunrpc/clnt.h
60205--- linux-3.0.7/include/linux/sunrpc/clnt.h 2011-07-21 22:17:23.000000000 -0400
60206+++ linux-3.0.7/include/linux/sunrpc/clnt.h 2011-08-23 21:47:56.000000000 -0400
60207@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60208 {
60209 switch (sap->sa_family) {
60210 case AF_INET:
60211- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60212+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60213 case AF_INET6:
60214- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60215+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60216 }
60217 return 0;
60218 }
60219@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60220 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60221 const struct sockaddr *src)
60222 {
60223- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60224+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60225 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60226
60227 dsin->sin_family = ssin->sin_family;
60228@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60229 if (sa->sa_family != AF_INET6)
60230 return 0;
60231
60232- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60233+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60234 }
60235
60236 #endif /* __KERNEL__ */
60237diff -urNp linux-3.0.7/include/linux/sunrpc/svc_rdma.h linux-3.0.7/include/linux/sunrpc/svc_rdma.h
60238--- linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-07-21 22:17:23.000000000 -0400
60239+++ linux-3.0.7/include/linux/sunrpc/svc_rdma.h 2011-08-23 21:47:56.000000000 -0400
60240@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60241 extern unsigned int svcrdma_max_requests;
60242 extern unsigned int svcrdma_max_req_size;
60243
60244-extern atomic_t rdma_stat_recv;
60245-extern atomic_t rdma_stat_read;
60246-extern atomic_t rdma_stat_write;
60247-extern atomic_t rdma_stat_sq_starve;
60248-extern atomic_t rdma_stat_rq_starve;
60249-extern atomic_t rdma_stat_rq_poll;
60250-extern atomic_t rdma_stat_rq_prod;
60251-extern atomic_t rdma_stat_sq_poll;
60252-extern atomic_t rdma_stat_sq_prod;
60253+extern atomic_unchecked_t rdma_stat_recv;
60254+extern atomic_unchecked_t rdma_stat_read;
60255+extern atomic_unchecked_t rdma_stat_write;
60256+extern atomic_unchecked_t rdma_stat_sq_starve;
60257+extern atomic_unchecked_t rdma_stat_rq_starve;
60258+extern atomic_unchecked_t rdma_stat_rq_poll;
60259+extern atomic_unchecked_t rdma_stat_rq_prod;
60260+extern atomic_unchecked_t rdma_stat_sq_poll;
60261+extern atomic_unchecked_t rdma_stat_sq_prod;
60262
60263 #define RPCRDMA_VERSION 1
60264
60265diff -urNp linux-3.0.7/include/linux/sysctl.h linux-3.0.7/include/linux/sysctl.h
60266--- linux-3.0.7/include/linux/sysctl.h 2011-07-21 22:17:23.000000000 -0400
60267+++ linux-3.0.7/include/linux/sysctl.h 2011-08-23 21:48:14.000000000 -0400
60268@@ -155,7 +155,11 @@ enum
60269 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60270 };
60271
60272-
60273+#ifdef CONFIG_PAX_SOFTMODE
60274+enum {
60275+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60276+};
60277+#endif
60278
60279 /* CTL_VM names: */
60280 enum
60281@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60282
60283 extern int proc_dostring(struct ctl_table *, int,
60284 void __user *, size_t *, loff_t *);
60285+extern int proc_dostring_modpriv(struct ctl_table *, int,
60286+ void __user *, size_t *, loff_t *);
60287 extern int proc_dointvec(struct ctl_table *, int,
60288 void __user *, size_t *, loff_t *);
60289 extern int proc_dointvec_minmax(struct ctl_table *, int,
60290diff -urNp linux-3.0.7/include/linux/tty_ldisc.h linux-3.0.7/include/linux/tty_ldisc.h
60291--- linux-3.0.7/include/linux/tty_ldisc.h 2011-07-21 22:17:23.000000000 -0400
60292+++ linux-3.0.7/include/linux/tty_ldisc.h 2011-08-23 21:47:56.000000000 -0400
60293@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60294
60295 struct module *owner;
60296
60297- int refcount;
60298+ atomic_t refcount;
60299 };
60300
60301 struct tty_ldisc {
60302diff -urNp linux-3.0.7/include/linux/types.h linux-3.0.7/include/linux/types.h
60303--- linux-3.0.7/include/linux/types.h 2011-07-21 22:17:23.000000000 -0400
60304+++ linux-3.0.7/include/linux/types.h 2011-08-23 21:47:56.000000000 -0400
60305@@ -213,10 +213,26 @@ typedef struct {
60306 int counter;
60307 } atomic_t;
60308
60309+#ifdef CONFIG_PAX_REFCOUNT
60310+typedef struct {
60311+ int counter;
60312+} atomic_unchecked_t;
60313+#else
60314+typedef atomic_t atomic_unchecked_t;
60315+#endif
60316+
60317 #ifdef CONFIG_64BIT
60318 typedef struct {
60319 long counter;
60320 } atomic64_t;
60321+
60322+#ifdef CONFIG_PAX_REFCOUNT
60323+typedef struct {
60324+ long counter;
60325+} atomic64_unchecked_t;
60326+#else
60327+typedef atomic64_t atomic64_unchecked_t;
60328+#endif
60329 #endif
60330
60331 struct list_head {
60332diff -urNp linux-3.0.7/include/linux/uaccess.h linux-3.0.7/include/linux/uaccess.h
60333--- linux-3.0.7/include/linux/uaccess.h 2011-07-21 22:17:23.000000000 -0400
60334+++ linux-3.0.7/include/linux/uaccess.h 2011-10-06 04:17:55.000000000 -0400
60335@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60336 long ret; \
60337 mm_segment_t old_fs = get_fs(); \
60338 \
60339- set_fs(KERNEL_DS); \
60340 pagefault_disable(); \
60341- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60342- pagefault_enable(); \
60343+ set_fs(KERNEL_DS); \
60344+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60345 set_fs(old_fs); \
60346+ pagefault_enable(); \
60347 ret; \
60348 })
60349
60350diff -urNp linux-3.0.7/include/linux/unaligned/access_ok.h linux-3.0.7/include/linux/unaligned/access_ok.h
60351--- linux-3.0.7/include/linux/unaligned/access_ok.h 2011-07-21 22:17:23.000000000 -0400
60352+++ linux-3.0.7/include/linux/unaligned/access_ok.h 2011-08-23 21:47:56.000000000 -0400
60353@@ -6,32 +6,32 @@
60354
60355 static inline u16 get_unaligned_le16(const void *p)
60356 {
60357- return le16_to_cpup((__le16 *)p);
60358+ return le16_to_cpup((const __le16 *)p);
60359 }
60360
60361 static inline u32 get_unaligned_le32(const void *p)
60362 {
60363- return le32_to_cpup((__le32 *)p);
60364+ return le32_to_cpup((const __le32 *)p);
60365 }
60366
60367 static inline u64 get_unaligned_le64(const void *p)
60368 {
60369- return le64_to_cpup((__le64 *)p);
60370+ return le64_to_cpup((const __le64 *)p);
60371 }
60372
60373 static inline u16 get_unaligned_be16(const void *p)
60374 {
60375- return be16_to_cpup((__be16 *)p);
60376+ return be16_to_cpup((const __be16 *)p);
60377 }
60378
60379 static inline u32 get_unaligned_be32(const void *p)
60380 {
60381- return be32_to_cpup((__be32 *)p);
60382+ return be32_to_cpup((const __be32 *)p);
60383 }
60384
60385 static inline u64 get_unaligned_be64(const void *p)
60386 {
60387- return be64_to_cpup((__be64 *)p);
60388+ return be64_to_cpup((const __be64 *)p);
60389 }
60390
60391 static inline void put_unaligned_le16(u16 val, void *p)
60392diff -urNp linux-3.0.7/include/linux/vermagic.h linux-3.0.7/include/linux/vermagic.h
60393--- linux-3.0.7/include/linux/vermagic.h 2011-07-21 22:17:23.000000000 -0400
60394+++ linux-3.0.7/include/linux/vermagic.h 2011-10-07 19:25:35.000000000 -0400
60395@@ -26,9 +26,28 @@
60396 #define MODULE_ARCH_VERMAGIC ""
60397 #endif
60398
60399+#ifdef CONFIG_PAX_REFCOUNT
60400+#define MODULE_PAX_REFCOUNT "REFCOUNT "
60401+#else
60402+#define MODULE_PAX_REFCOUNT ""
60403+#endif
60404+
60405+#ifdef CONSTIFY_PLUGIN
60406+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60407+#else
60408+#define MODULE_CONSTIFY_PLUGIN ""
60409+#endif
60410+
60411+#ifdef CONFIG_GRKERNSEC
60412+#define MODULE_GRSEC "GRSEC "
60413+#else
60414+#define MODULE_GRSEC ""
60415+#endif
60416+
60417 #define VERMAGIC_STRING \
60418 UTS_RELEASE " " \
60419 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60420 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60421- MODULE_ARCH_VERMAGIC
60422+ MODULE_ARCH_VERMAGIC \
60423+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_GRSEC
60424
60425diff -urNp linux-3.0.7/include/linux/vmalloc.h linux-3.0.7/include/linux/vmalloc.h
60426--- linux-3.0.7/include/linux/vmalloc.h 2011-07-21 22:17:23.000000000 -0400
60427+++ linux-3.0.7/include/linux/vmalloc.h 2011-08-23 21:47:56.000000000 -0400
60428@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60429 #define VM_MAP 0x00000004 /* vmap()ed pages */
60430 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60431 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60432+
60433+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60434+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60435+#endif
60436+
60437 /* bits [20..32] reserved for arch specific ioremap internals */
60438
60439 /*
60440@@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60441 # endif
60442 #endif
60443
60444+#define vmalloc(x) \
60445+({ \
60446+ void *___retval; \
60447+ intoverflow_t ___x = (intoverflow_t)x; \
60448+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60449+ ___retval = NULL; \
60450+ else \
60451+ ___retval = vmalloc((unsigned long)___x); \
60452+ ___retval; \
60453+})
60454+
60455+#define vzalloc(x) \
60456+({ \
60457+ void *___retval; \
60458+ intoverflow_t ___x = (intoverflow_t)x; \
60459+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60460+ ___retval = NULL; \
60461+ else \
60462+ ___retval = vzalloc((unsigned long)___x); \
60463+ ___retval; \
60464+})
60465+
60466+#define __vmalloc(x, y, z) \
60467+({ \
60468+ void *___retval; \
60469+ intoverflow_t ___x = (intoverflow_t)x; \
60470+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60471+ ___retval = NULL; \
60472+ else \
60473+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60474+ ___retval; \
60475+})
60476+
60477+#define vmalloc_user(x) \
60478+({ \
60479+ void *___retval; \
60480+ intoverflow_t ___x = (intoverflow_t)x; \
60481+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60482+ ___retval = NULL; \
60483+ else \
60484+ ___retval = vmalloc_user((unsigned long)___x); \
60485+ ___retval; \
60486+})
60487+
60488+#define vmalloc_exec(x) \
60489+({ \
60490+ void *___retval; \
60491+ intoverflow_t ___x = (intoverflow_t)x; \
60492+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60493+ ___retval = NULL; \
60494+ else \
60495+ ___retval = vmalloc_exec((unsigned long)___x); \
60496+ ___retval; \
60497+})
60498+
60499+#define vmalloc_node(x, y) \
60500+({ \
60501+ void *___retval; \
60502+ intoverflow_t ___x = (intoverflow_t)x; \
60503+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60504+ ___retval = NULL; \
60505+ else \
60506+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60507+ ___retval; \
60508+})
60509+
60510+#define vzalloc_node(x, y) \
60511+({ \
60512+ void *___retval; \
60513+ intoverflow_t ___x = (intoverflow_t)x; \
60514+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60515+ ___retval = NULL; \
60516+ else \
60517+ ___retval = vzalloc_node((unsigned long)___x, (y));\
60518+ ___retval; \
60519+})
60520+
60521+#define vmalloc_32(x) \
60522+({ \
60523+ void *___retval; \
60524+ intoverflow_t ___x = (intoverflow_t)x; \
60525+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60526+ ___retval = NULL; \
60527+ else \
60528+ ___retval = vmalloc_32((unsigned long)___x); \
60529+ ___retval; \
60530+})
60531+
60532+#define vmalloc_32_user(x) \
60533+({ \
60534+void *___retval; \
60535+ intoverflow_t ___x = (intoverflow_t)x; \
60536+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60537+ ___retval = NULL; \
60538+ else \
60539+ ___retval = vmalloc_32_user((unsigned long)___x);\
60540+ ___retval; \
60541+})
60542+
60543 #endif /* _LINUX_VMALLOC_H */
60544diff -urNp linux-3.0.7/include/linux/vmstat.h linux-3.0.7/include/linux/vmstat.h
60545--- linux-3.0.7/include/linux/vmstat.h 2011-07-21 22:17:23.000000000 -0400
60546+++ linux-3.0.7/include/linux/vmstat.h 2011-08-23 21:47:56.000000000 -0400
60547@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
60548 /*
60549 * Zone based page accounting with per cpu differentials.
60550 */
60551-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60552+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60553
60554 static inline void zone_page_state_add(long x, struct zone *zone,
60555 enum zone_stat_item item)
60556 {
60557- atomic_long_add(x, &zone->vm_stat[item]);
60558- atomic_long_add(x, &vm_stat[item]);
60559+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60560+ atomic_long_add_unchecked(x, &vm_stat[item]);
60561 }
60562
60563 static inline unsigned long global_page_state(enum zone_stat_item item)
60564 {
60565- long x = atomic_long_read(&vm_stat[item]);
60566+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60567 #ifdef CONFIG_SMP
60568 if (x < 0)
60569 x = 0;
60570@@ -109,7 +109,7 @@ static inline unsigned long global_page_
60571 static inline unsigned long zone_page_state(struct zone *zone,
60572 enum zone_stat_item item)
60573 {
60574- long x = atomic_long_read(&zone->vm_stat[item]);
60575+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60576 #ifdef CONFIG_SMP
60577 if (x < 0)
60578 x = 0;
60579@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
60580 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60581 enum zone_stat_item item)
60582 {
60583- long x = atomic_long_read(&zone->vm_stat[item]);
60584+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60585
60586 #ifdef CONFIG_SMP
60587 int cpu;
60588@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
60589
60590 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60591 {
60592- atomic_long_inc(&zone->vm_stat[item]);
60593- atomic_long_inc(&vm_stat[item]);
60594+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60595+ atomic_long_inc_unchecked(&vm_stat[item]);
60596 }
60597
60598 static inline void __inc_zone_page_state(struct page *page,
60599@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
60600
60601 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60602 {
60603- atomic_long_dec(&zone->vm_stat[item]);
60604- atomic_long_dec(&vm_stat[item]);
60605+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60606+ atomic_long_dec_unchecked(&vm_stat[item]);
60607 }
60608
60609 static inline void __dec_zone_page_state(struct page *page,
60610diff -urNp linux-3.0.7/include/media/saa7146_vv.h linux-3.0.7/include/media/saa7146_vv.h
60611--- linux-3.0.7/include/media/saa7146_vv.h 2011-07-21 22:17:23.000000000 -0400
60612+++ linux-3.0.7/include/media/saa7146_vv.h 2011-10-07 19:07:40.000000000 -0400
60613@@ -163,7 +163,7 @@ struct saa7146_ext_vv
60614 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60615
60616 /* the extension can override this */
60617- struct v4l2_ioctl_ops ops;
60618+ v4l2_ioctl_ops_no_const ops;
60619 /* pointer to the saa7146 core ops */
60620 const struct v4l2_ioctl_ops *core_ops;
60621
60622diff -urNp linux-3.0.7/include/media/v4l2-dev.h linux-3.0.7/include/media/v4l2-dev.h
60623--- linux-3.0.7/include/media/v4l2-dev.h 2011-07-21 22:17:23.000000000 -0400
60624+++ linux-3.0.7/include/media/v4l2-dev.h 2011-10-07 19:07:40.000000000 -0400
60625@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
60626
60627
60628 struct v4l2_file_operations {
60629- struct module *owner;
60630+ struct module * const owner;
60631 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60632 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60633 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60634@@ -68,6 +68,7 @@ struct v4l2_file_operations {
60635 int (*open) (struct file *);
60636 int (*release) (struct file *);
60637 };
60638+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
60639
60640 /*
60641 * Newer version of video_device, handled by videodev2.c
60642diff -urNp linux-3.0.7/include/media/v4l2-ioctl.h linux-3.0.7/include/media/v4l2-ioctl.h
60643--- linux-3.0.7/include/media/v4l2-ioctl.h 2011-07-21 22:17:23.000000000 -0400
60644+++ linux-3.0.7/include/media/v4l2-ioctl.h 2011-08-24 18:25:45.000000000 -0400
60645@@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
60646 long (*vidioc_default) (struct file *file, void *fh,
60647 bool valid_prio, int cmd, void *arg);
60648 };
60649+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60650
60651
60652 /* v4l debugging and diagnostics */
60653diff -urNp linux-3.0.7/include/net/caif/cfctrl.h linux-3.0.7/include/net/caif/cfctrl.h
60654--- linux-3.0.7/include/net/caif/cfctrl.h 2011-07-21 22:17:23.000000000 -0400
60655+++ linux-3.0.7/include/net/caif/cfctrl.h 2011-08-23 21:47:56.000000000 -0400
60656@@ -52,7 +52,7 @@ struct cfctrl_rsp {
60657 void (*radioset_rsp)(void);
60658 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
60659 struct cflayer *client_layer);
60660-};
60661+} __no_const;
60662
60663 /* Link Setup Parameters for CAIF-Links. */
60664 struct cfctrl_link_param {
60665@@ -101,8 +101,8 @@ struct cfctrl_request_info {
60666 struct cfctrl {
60667 struct cfsrvl serv;
60668 struct cfctrl_rsp res;
60669- atomic_t req_seq_no;
60670- atomic_t rsp_seq_no;
60671+ atomic_unchecked_t req_seq_no;
60672+ atomic_unchecked_t rsp_seq_no;
60673 struct list_head list;
60674 /* Protects from simultaneous access to first_req list */
60675 spinlock_t info_list_lock;
60676diff -urNp linux-3.0.7/include/net/flow.h linux-3.0.7/include/net/flow.h
60677--- linux-3.0.7/include/net/flow.h 2011-07-21 22:17:23.000000000 -0400
60678+++ linux-3.0.7/include/net/flow.h 2011-08-23 21:47:56.000000000 -0400
60679@@ -188,6 +188,6 @@ extern struct flow_cache_object *flow_ca
60680 u8 dir, flow_resolve_t resolver, void *ctx);
60681
60682 extern void flow_cache_flush(void);
60683-extern atomic_t flow_cache_genid;
60684+extern atomic_unchecked_t flow_cache_genid;
60685
60686 #endif
60687diff -urNp linux-3.0.7/include/net/inetpeer.h linux-3.0.7/include/net/inetpeer.h
60688--- linux-3.0.7/include/net/inetpeer.h 2011-07-21 22:17:23.000000000 -0400
60689+++ linux-3.0.7/include/net/inetpeer.h 2011-08-23 21:47:56.000000000 -0400
60690@@ -43,8 +43,8 @@ struct inet_peer {
60691 */
60692 union {
60693 struct {
60694- atomic_t rid; /* Frag reception counter */
60695- atomic_t ip_id_count; /* IP ID for the next packet */
60696+ atomic_unchecked_t rid; /* Frag reception counter */
60697+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
60698 __u32 tcp_ts;
60699 __u32 tcp_ts_stamp;
60700 u32 metrics[RTAX_MAX];
60701@@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
60702 {
60703 more++;
60704 inet_peer_refcheck(p);
60705- return atomic_add_return(more, &p->ip_id_count) - more;
60706+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
60707 }
60708
60709 #endif /* _NET_INETPEER_H */
60710diff -urNp linux-3.0.7/include/net/ip_fib.h linux-3.0.7/include/net/ip_fib.h
60711--- linux-3.0.7/include/net/ip_fib.h 2011-07-21 22:17:23.000000000 -0400
60712+++ linux-3.0.7/include/net/ip_fib.h 2011-08-23 21:47:56.000000000 -0400
60713@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
60714
60715 #define FIB_RES_SADDR(net, res) \
60716 ((FIB_RES_NH(res).nh_saddr_genid == \
60717- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
60718+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
60719 FIB_RES_NH(res).nh_saddr : \
60720 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
60721 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
60722diff -urNp linux-3.0.7/include/net/ip_vs.h linux-3.0.7/include/net/ip_vs.h
60723--- linux-3.0.7/include/net/ip_vs.h 2011-07-21 22:17:23.000000000 -0400
60724+++ linux-3.0.7/include/net/ip_vs.h 2011-08-23 21:47:56.000000000 -0400
60725@@ -509,7 +509,7 @@ struct ip_vs_conn {
60726 struct ip_vs_conn *control; /* Master control connection */
60727 atomic_t n_control; /* Number of controlled ones */
60728 struct ip_vs_dest *dest; /* real server */
60729- atomic_t in_pkts; /* incoming packet counter */
60730+ atomic_unchecked_t in_pkts; /* incoming packet counter */
60731
60732 /* packet transmitter for different forwarding methods. If it
60733 mangles the packet, it must return NF_DROP or better NF_STOLEN,
60734@@ -647,7 +647,7 @@ struct ip_vs_dest {
60735 __be16 port; /* port number of the server */
60736 union nf_inet_addr addr; /* IP address of the server */
60737 volatile unsigned flags; /* dest status flags */
60738- atomic_t conn_flags; /* flags to copy to conn */
60739+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
60740 atomic_t weight; /* server weight */
60741
60742 atomic_t refcnt; /* reference counter */
60743diff -urNp linux-3.0.7/include/net/irda/ircomm_core.h linux-3.0.7/include/net/irda/ircomm_core.h
60744--- linux-3.0.7/include/net/irda/ircomm_core.h 2011-07-21 22:17:23.000000000 -0400
60745+++ linux-3.0.7/include/net/irda/ircomm_core.h 2011-08-23 21:47:56.000000000 -0400
60746@@ -51,7 +51,7 @@ typedef struct {
60747 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
60748 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
60749 struct ircomm_info *);
60750-} call_t;
60751+} __no_const call_t;
60752
60753 struct ircomm_cb {
60754 irda_queue_t queue;
60755diff -urNp linux-3.0.7/include/net/irda/ircomm_tty.h linux-3.0.7/include/net/irda/ircomm_tty.h
60756--- linux-3.0.7/include/net/irda/ircomm_tty.h 2011-07-21 22:17:23.000000000 -0400
60757+++ linux-3.0.7/include/net/irda/ircomm_tty.h 2011-08-23 21:47:56.000000000 -0400
60758@@ -35,6 +35,7 @@
60759 #include <linux/termios.h>
60760 #include <linux/timer.h>
60761 #include <linux/tty.h> /* struct tty_struct */
60762+#include <asm/local.h>
60763
60764 #include <net/irda/irias_object.h>
60765 #include <net/irda/ircomm_core.h>
60766@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
60767 unsigned short close_delay;
60768 unsigned short closing_wait; /* time to wait before closing */
60769
60770- int open_count;
60771- int blocked_open; /* # of blocked opens */
60772+ local_t open_count;
60773+ local_t blocked_open; /* # of blocked opens */
60774
60775 /* Protect concurent access to :
60776 * o self->open_count
60777diff -urNp linux-3.0.7/include/net/iucv/af_iucv.h linux-3.0.7/include/net/iucv/af_iucv.h
60778--- linux-3.0.7/include/net/iucv/af_iucv.h 2011-07-21 22:17:23.000000000 -0400
60779+++ linux-3.0.7/include/net/iucv/af_iucv.h 2011-08-23 21:47:56.000000000 -0400
60780@@ -87,7 +87,7 @@ struct iucv_sock {
60781 struct iucv_sock_list {
60782 struct hlist_head head;
60783 rwlock_t lock;
60784- atomic_t autobind_name;
60785+ atomic_unchecked_t autobind_name;
60786 };
60787
60788 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
60789diff -urNp linux-3.0.7/include/net/lapb.h linux-3.0.7/include/net/lapb.h
60790--- linux-3.0.7/include/net/lapb.h 2011-07-21 22:17:23.000000000 -0400
60791+++ linux-3.0.7/include/net/lapb.h 2011-08-23 21:47:56.000000000 -0400
60792@@ -95,7 +95,7 @@ struct lapb_cb {
60793 struct sk_buff_head write_queue;
60794 struct sk_buff_head ack_queue;
60795 unsigned char window;
60796- struct lapb_register_struct callbacks;
60797+ struct lapb_register_struct *callbacks;
60798
60799 /* FRMR control information */
60800 struct lapb_frame frmr_data;
60801diff -urNp linux-3.0.7/include/net/neighbour.h linux-3.0.7/include/net/neighbour.h
60802--- linux-3.0.7/include/net/neighbour.h 2011-07-21 22:17:23.000000000 -0400
60803+++ linux-3.0.7/include/net/neighbour.h 2011-08-31 18:39:25.000000000 -0400
60804@@ -124,7 +124,7 @@ struct neigh_ops {
60805 int (*connected_output)(struct sk_buff*);
60806 int (*hh_output)(struct sk_buff*);
60807 int (*queue_xmit)(struct sk_buff*);
60808-};
60809+} __do_const;
60810
60811 struct pneigh_entry {
60812 struct pneigh_entry *next;
60813diff -urNp linux-3.0.7/include/net/netlink.h linux-3.0.7/include/net/netlink.h
60814--- linux-3.0.7/include/net/netlink.h 2011-07-21 22:17:23.000000000 -0400
60815+++ linux-3.0.7/include/net/netlink.h 2011-08-23 21:47:56.000000000 -0400
60816@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
60817 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
60818 {
60819 if (mark)
60820- skb_trim(skb, (unsigned char *) mark - skb->data);
60821+ skb_trim(skb, (const unsigned char *) mark - skb->data);
60822 }
60823
60824 /**
60825diff -urNp linux-3.0.7/include/net/netns/ipv4.h linux-3.0.7/include/net/netns/ipv4.h
60826--- linux-3.0.7/include/net/netns/ipv4.h 2011-07-21 22:17:23.000000000 -0400
60827+++ linux-3.0.7/include/net/netns/ipv4.h 2011-08-23 21:47:56.000000000 -0400
60828@@ -56,8 +56,8 @@ struct netns_ipv4 {
60829
60830 unsigned int sysctl_ping_group_range[2];
60831
60832- atomic_t rt_genid;
60833- atomic_t dev_addr_genid;
60834+ atomic_unchecked_t rt_genid;
60835+ atomic_unchecked_t dev_addr_genid;
60836
60837 #ifdef CONFIG_IP_MROUTE
60838 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
60839diff -urNp linux-3.0.7/include/net/sctp/sctp.h linux-3.0.7/include/net/sctp/sctp.h
60840--- linux-3.0.7/include/net/sctp/sctp.h 2011-07-21 22:17:23.000000000 -0400
60841+++ linux-3.0.7/include/net/sctp/sctp.h 2011-08-23 21:47:56.000000000 -0400
60842@@ -315,9 +315,9 @@ do { \
60843
60844 #else /* SCTP_DEBUG */
60845
60846-#define SCTP_DEBUG_PRINTK(whatever...)
60847-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
60848-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
60849+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
60850+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
60851+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
60852 #define SCTP_ENABLE_DEBUG
60853 #define SCTP_DISABLE_DEBUG
60854 #define SCTP_ASSERT(expr, str, func)
60855diff -urNp linux-3.0.7/include/net/sock.h linux-3.0.7/include/net/sock.h
60856--- linux-3.0.7/include/net/sock.h 2011-07-21 22:17:23.000000000 -0400
60857+++ linux-3.0.7/include/net/sock.h 2011-08-23 21:47:56.000000000 -0400
60858@@ -277,7 +277,7 @@ struct sock {
60859 #ifdef CONFIG_RPS
60860 __u32 sk_rxhash;
60861 #endif
60862- atomic_t sk_drops;
60863+ atomic_unchecked_t sk_drops;
60864 int sk_rcvbuf;
60865
60866 struct sk_filter __rcu *sk_filter;
60867@@ -1390,7 +1390,7 @@ static inline void sk_nocaps_add(struct
60868 }
60869
60870 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
60871- char __user *from, char *to,
60872+ char __user *from, unsigned char *to,
60873 int copy, int offset)
60874 {
60875 if (skb->ip_summed == CHECKSUM_NONE) {
60876diff -urNp linux-3.0.7/include/net/tcp.h linux-3.0.7/include/net/tcp.h
60877--- linux-3.0.7/include/net/tcp.h 2011-07-21 22:17:23.000000000 -0400
60878+++ linux-3.0.7/include/net/tcp.h 2011-08-23 21:47:56.000000000 -0400
60879@@ -1374,8 +1374,8 @@ enum tcp_seq_states {
60880 struct tcp_seq_afinfo {
60881 char *name;
60882 sa_family_t family;
60883- struct file_operations seq_fops;
60884- struct seq_operations seq_ops;
60885+ file_operations_no_const seq_fops;
60886+ seq_operations_no_const seq_ops;
60887 };
60888
60889 struct tcp_iter_state {
60890diff -urNp linux-3.0.7/include/net/udp.h linux-3.0.7/include/net/udp.h
60891--- linux-3.0.7/include/net/udp.h 2011-07-21 22:17:23.000000000 -0400
60892+++ linux-3.0.7/include/net/udp.h 2011-08-23 21:47:56.000000000 -0400
60893@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
60894 char *name;
60895 sa_family_t family;
60896 struct udp_table *udp_table;
60897- struct file_operations seq_fops;
60898- struct seq_operations seq_ops;
60899+ file_operations_no_const seq_fops;
60900+ seq_operations_no_const seq_ops;
60901 };
60902
60903 struct udp_iter_state {
60904diff -urNp linux-3.0.7/include/net/xfrm.h linux-3.0.7/include/net/xfrm.h
60905--- linux-3.0.7/include/net/xfrm.h 2011-07-21 22:17:23.000000000 -0400
60906+++ linux-3.0.7/include/net/xfrm.h 2011-08-23 21:47:56.000000000 -0400
60907@@ -505,7 +505,7 @@ struct xfrm_policy {
60908 struct timer_list timer;
60909
60910 struct flow_cache_object flo;
60911- atomic_t genid;
60912+ atomic_unchecked_t genid;
60913 u32 priority;
60914 u32 index;
60915 struct xfrm_mark mark;
60916diff -urNp linux-3.0.7/include/rdma/iw_cm.h linux-3.0.7/include/rdma/iw_cm.h
60917--- linux-3.0.7/include/rdma/iw_cm.h 2011-07-21 22:17:23.000000000 -0400
60918+++ linux-3.0.7/include/rdma/iw_cm.h 2011-08-23 21:47:56.000000000 -0400
60919@@ -120,7 +120,7 @@ struct iw_cm_verbs {
60920 int backlog);
60921
60922 int (*destroy_listen)(struct iw_cm_id *cm_id);
60923-};
60924+} __no_const;
60925
60926 /**
60927 * iw_create_cm_id - Create an IW CM identifier.
60928diff -urNp linux-3.0.7/include/scsi/libfc.h linux-3.0.7/include/scsi/libfc.h
60929--- linux-3.0.7/include/scsi/libfc.h 2011-07-21 22:17:23.000000000 -0400
60930+++ linux-3.0.7/include/scsi/libfc.h 2011-08-23 21:47:56.000000000 -0400
60931@@ -750,6 +750,7 @@ struct libfc_function_template {
60932 */
60933 void (*disc_stop_final) (struct fc_lport *);
60934 };
60935+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
60936
60937 /**
60938 * struct fc_disc - Discovery context
60939@@ -853,7 +854,7 @@ struct fc_lport {
60940 struct fc_vport *vport;
60941
60942 /* Operational Information */
60943- struct libfc_function_template tt;
60944+ libfc_function_template_no_const tt;
60945 u8 link_up;
60946 u8 qfull;
60947 enum fc_lport_state state;
60948diff -urNp linux-3.0.7/include/scsi/scsi_device.h linux-3.0.7/include/scsi/scsi_device.h
60949--- linux-3.0.7/include/scsi/scsi_device.h 2011-07-21 22:17:23.000000000 -0400
60950+++ linux-3.0.7/include/scsi/scsi_device.h 2011-08-23 21:47:56.000000000 -0400
60951@@ -161,9 +161,9 @@ struct scsi_device {
60952 unsigned int max_device_blocked; /* what device_blocked counts down from */
60953 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
60954
60955- atomic_t iorequest_cnt;
60956- atomic_t iodone_cnt;
60957- atomic_t ioerr_cnt;
60958+ atomic_unchecked_t iorequest_cnt;
60959+ atomic_unchecked_t iodone_cnt;
60960+ atomic_unchecked_t ioerr_cnt;
60961
60962 struct device sdev_gendev,
60963 sdev_dev;
60964diff -urNp linux-3.0.7/include/scsi/scsi_transport_fc.h linux-3.0.7/include/scsi/scsi_transport_fc.h
60965--- linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-07-21 22:17:23.000000000 -0400
60966+++ linux-3.0.7/include/scsi/scsi_transport_fc.h 2011-08-26 19:49:56.000000000 -0400
60967@@ -711,7 +711,7 @@ struct fc_function_template {
60968 unsigned long show_host_system_hostname:1;
60969
60970 unsigned long disable_target_scan:1;
60971-};
60972+} __do_const;
60973
60974
60975 /**
60976diff -urNp linux-3.0.7/include/sound/ak4xxx-adda.h linux-3.0.7/include/sound/ak4xxx-adda.h
60977--- linux-3.0.7/include/sound/ak4xxx-adda.h 2011-07-21 22:17:23.000000000 -0400
60978+++ linux-3.0.7/include/sound/ak4xxx-adda.h 2011-08-23 21:47:56.000000000 -0400
60979@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
60980 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
60981 unsigned char val);
60982 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
60983-};
60984+} __no_const;
60985
60986 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
60987
60988diff -urNp linux-3.0.7/include/sound/hwdep.h linux-3.0.7/include/sound/hwdep.h
60989--- linux-3.0.7/include/sound/hwdep.h 2011-07-21 22:17:23.000000000 -0400
60990+++ linux-3.0.7/include/sound/hwdep.h 2011-08-23 21:47:56.000000000 -0400
60991@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
60992 struct snd_hwdep_dsp_status *status);
60993 int (*dsp_load)(struct snd_hwdep *hw,
60994 struct snd_hwdep_dsp_image *image);
60995-};
60996+} __no_const;
60997
60998 struct snd_hwdep {
60999 struct snd_card *card;
61000diff -urNp linux-3.0.7/include/sound/info.h linux-3.0.7/include/sound/info.h
61001--- linux-3.0.7/include/sound/info.h 2011-07-21 22:17:23.000000000 -0400
61002+++ linux-3.0.7/include/sound/info.h 2011-08-23 21:47:56.000000000 -0400
61003@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61004 struct snd_info_buffer *buffer);
61005 void (*write)(struct snd_info_entry *entry,
61006 struct snd_info_buffer *buffer);
61007-};
61008+} __no_const;
61009
61010 struct snd_info_entry_ops {
61011 int (*open)(struct snd_info_entry *entry,
61012diff -urNp linux-3.0.7/include/sound/pcm.h linux-3.0.7/include/sound/pcm.h
61013--- linux-3.0.7/include/sound/pcm.h 2011-07-21 22:17:23.000000000 -0400
61014+++ linux-3.0.7/include/sound/pcm.h 2011-08-23 21:47:56.000000000 -0400
61015@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61016 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61017 int (*ack)(struct snd_pcm_substream *substream);
61018 };
61019+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61020
61021 /*
61022 *
61023diff -urNp linux-3.0.7/include/sound/sb16_csp.h linux-3.0.7/include/sound/sb16_csp.h
61024--- linux-3.0.7/include/sound/sb16_csp.h 2011-07-21 22:17:23.000000000 -0400
61025+++ linux-3.0.7/include/sound/sb16_csp.h 2011-08-23 21:47:56.000000000 -0400
61026@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61027 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61028 int (*csp_stop) (struct snd_sb_csp * p);
61029 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61030-};
61031+} __no_const;
61032
61033 /*
61034 * CSP private data
61035diff -urNp linux-3.0.7/include/sound/soc.h linux-3.0.7/include/sound/soc.h
61036--- linux-3.0.7/include/sound/soc.h 2011-07-21 22:17:23.000000000 -0400
61037+++ linux-3.0.7/include/sound/soc.h 2011-08-26 19:49:56.000000000 -0400
61038@@ -636,7 +636,7 @@ struct snd_soc_platform_driver {
61039
61040 /* platform stream ops */
61041 struct snd_pcm_ops *ops;
61042-};
61043+} __do_const;
61044
61045 struct snd_soc_platform {
61046 const char *name;
61047diff -urNp linux-3.0.7/include/sound/ymfpci.h linux-3.0.7/include/sound/ymfpci.h
61048--- linux-3.0.7/include/sound/ymfpci.h 2011-07-21 22:17:23.000000000 -0400
61049+++ linux-3.0.7/include/sound/ymfpci.h 2011-08-23 21:47:56.000000000 -0400
61050@@ -358,7 +358,7 @@ struct snd_ymfpci {
61051 spinlock_t reg_lock;
61052 spinlock_t voice_lock;
61053 wait_queue_head_t interrupt_sleep;
61054- atomic_t interrupt_sleep_count;
61055+ atomic_unchecked_t interrupt_sleep_count;
61056 struct snd_info_entry *proc_entry;
61057 const struct firmware *dsp_microcode;
61058 const struct firmware *controller_microcode;
61059diff -urNp linux-3.0.7/include/target/target_core_base.h linux-3.0.7/include/target/target_core_base.h
61060--- linux-3.0.7/include/target/target_core_base.h 2011-07-21 22:17:23.000000000 -0400
61061+++ linux-3.0.7/include/target/target_core_base.h 2011-08-23 21:47:56.000000000 -0400
61062@@ -364,7 +364,7 @@ struct t10_reservation_ops {
61063 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61064 int (*t10_pr_register)(struct se_cmd *);
61065 int (*t10_pr_clear)(struct se_cmd *);
61066-};
61067+} __no_const;
61068
61069 struct t10_reservation_template {
61070 /* Reservation effects all target ports */
61071@@ -432,8 +432,8 @@ struct se_transport_task {
61072 atomic_t t_task_cdbs_left;
61073 atomic_t t_task_cdbs_ex_left;
61074 atomic_t t_task_cdbs_timeout_left;
61075- atomic_t t_task_cdbs_sent;
61076- atomic_t t_transport_aborted;
61077+ atomic_unchecked_t t_task_cdbs_sent;
61078+ atomic_unchecked_t t_transport_aborted;
61079 atomic_t t_transport_active;
61080 atomic_t t_transport_complete;
61081 atomic_t t_transport_queue_active;
61082@@ -774,7 +774,7 @@ struct se_device {
61083 atomic_t active_cmds;
61084 atomic_t simple_cmds;
61085 atomic_t depth_left;
61086- atomic_t dev_ordered_id;
61087+ atomic_unchecked_t dev_ordered_id;
61088 atomic_t dev_tur_active;
61089 atomic_t execute_tasks;
61090 atomic_t dev_status_thr_count;
61091diff -urNp linux-3.0.7/include/trace/events/irq.h linux-3.0.7/include/trace/events/irq.h
61092--- linux-3.0.7/include/trace/events/irq.h 2011-07-21 22:17:23.000000000 -0400
61093+++ linux-3.0.7/include/trace/events/irq.h 2011-08-23 21:47:56.000000000 -0400
61094@@ -36,7 +36,7 @@ struct softirq_action;
61095 */
61096 TRACE_EVENT(irq_handler_entry,
61097
61098- TP_PROTO(int irq, struct irqaction *action),
61099+ TP_PROTO(int irq, const struct irqaction *action),
61100
61101 TP_ARGS(irq, action),
61102
61103@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61104 */
61105 TRACE_EVENT(irq_handler_exit,
61106
61107- TP_PROTO(int irq, struct irqaction *action, int ret),
61108+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61109
61110 TP_ARGS(irq, action, ret),
61111
61112diff -urNp linux-3.0.7/include/video/udlfb.h linux-3.0.7/include/video/udlfb.h
61113--- linux-3.0.7/include/video/udlfb.h 2011-07-21 22:17:23.000000000 -0400
61114+++ linux-3.0.7/include/video/udlfb.h 2011-08-23 21:47:56.000000000 -0400
61115@@ -51,10 +51,10 @@ struct dlfb_data {
61116 int base8;
61117 u32 pseudo_palette[256];
61118 /* blit-only rendering path metrics, exposed through sysfs */
61119- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61120- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61121- atomic_t bytes_sent; /* to usb, after compression including overhead */
61122- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61123+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61124+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61125+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61126+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61127 };
61128
61129 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61130diff -urNp linux-3.0.7/include/video/uvesafb.h linux-3.0.7/include/video/uvesafb.h
61131--- linux-3.0.7/include/video/uvesafb.h 2011-07-21 22:17:23.000000000 -0400
61132+++ linux-3.0.7/include/video/uvesafb.h 2011-08-23 21:47:56.000000000 -0400
61133@@ -177,6 +177,7 @@ struct uvesafb_par {
61134 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61135 u8 pmi_setpal; /* PMI for palette changes */
61136 u16 *pmi_base; /* protected mode interface location */
61137+ u8 *pmi_code; /* protected mode code location */
61138 void *pmi_start;
61139 void *pmi_pal;
61140 u8 *vbe_state_orig; /*
61141diff -urNp linux-3.0.7/init/do_mounts.c linux-3.0.7/init/do_mounts.c
61142--- linux-3.0.7/init/do_mounts.c 2011-07-21 22:17:23.000000000 -0400
61143+++ linux-3.0.7/init/do_mounts.c 2011-10-06 04:17:55.000000000 -0400
61144@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61145
61146 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61147 {
61148- int err = sys_mount(name, "/root", fs, flags, data);
61149+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61150 if (err)
61151 return err;
61152
61153- sys_chdir((const char __user __force *)"/root");
61154+ sys_chdir((const char __force_user*)"/root");
61155 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61156 printk(KERN_INFO
61157 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61158@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61159 va_start(args, fmt);
61160 vsprintf(buf, fmt, args);
61161 va_end(args);
61162- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61163+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61164 if (fd >= 0) {
61165 sys_ioctl(fd, FDEJECT, 0);
61166 sys_close(fd);
61167 }
61168 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61169- fd = sys_open("/dev/console", O_RDWR, 0);
61170+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61171 if (fd >= 0) {
61172 sys_ioctl(fd, TCGETS, (long)&termios);
61173 termios.c_lflag &= ~ICANON;
61174 sys_ioctl(fd, TCSETSF, (long)&termios);
61175- sys_read(fd, &c, 1);
61176+ sys_read(fd, (char __user *)&c, 1);
61177 termios.c_lflag |= ICANON;
61178 sys_ioctl(fd, TCSETSF, (long)&termios);
61179 sys_close(fd);
61180@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61181 mount_root();
61182 out:
61183 devtmpfs_mount("dev");
61184- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61185- sys_chroot((const char __user __force *)".");
61186+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61187+ sys_chroot((const char __force_user *)".");
61188 }
61189diff -urNp linux-3.0.7/init/do_mounts.h linux-3.0.7/init/do_mounts.h
61190--- linux-3.0.7/init/do_mounts.h 2011-07-21 22:17:23.000000000 -0400
61191+++ linux-3.0.7/init/do_mounts.h 2011-10-06 04:17:55.000000000 -0400
61192@@ -15,15 +15,15 @@ extern int root_mountflags;
61193
61194 static inline int create_dev(char *name, dev_t dev)
61195 {
61196- sys_unlink(name);
61197- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61198+ sys_unlink((char __force_user *)name);
61199+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61200 }
61201
61202 #if BITS_PER_LONG == 32
61203 static inline u32 bstat(char *name)
61204 {
61205 struct stat64 stat;
61206- if (sys_stat64(name, &stat) != 0)
61207+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61208 return 0;
61209 if (!S_ISBLK(stat.st_mode))
61210 return 0;
61211@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61212 static inline u32 bstat(char *name)
61213 {
61214 struct stat stat;
61215- if (sys_newstat(name, &stat) != 0)
61216+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61217 return 0;
61218 if (!S_ISBLK(stat.st_mode))
61219 return 0;
61220diff -urNp linux-3.0.7/init/do_mounts_initrd.c linux-3.0.7/init/do_mounts_initrd.c
61221--- linux-3.0.7/init/do_mounts_initrd.c 2011-07-21 22:17:23.000000000 -0400
61222+++ linux-3.0.7/init/do_mounts_initrd.c 2011-10-06 04:17:55.000000000 -0400
61223@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61224 create_dev("/dev/root.old", Root_RAM0);
61225 /* mount initrd on rootfs' /root */
61226 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61227- sys_mkdir("/old", 0700);
61228- root_fd = sys_open("/", 0, 0);
61229- old_fd = sys_open("/old", 0, 0);
61230+ sys_mkdir((const char __force_user *)"/old", 0700);
61231+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61232+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61233 /* move initrd over / and chdir/chroot in initrd root */
61234- sys_chdir("/root");
61235- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61236- sys_chroot(".");
61237+ sys_chdir((const char __force_user *)"/root");
61238+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61239+ sys_chroot((const char __force_user *)".");
61240
61241 /*
61242 * In case that a resume from disk is carried out by linuxrc or one of
61243@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61244
61245 /* move initrd to rootfs' /old */
61246 sys_fchdir(old_fd);
61247- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61248+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61249 /* switch root and cwd back to / of rootfs */
61250 sys_fchdir(root_fd);
61251- sys_chroot(".");
61252+ sys_chroot((const char __force_user *)".");
61253 sys_close(old_fd);
61254 sys_close(root_fd);
61255
61256 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61257- sys_chdir("/old");
61258+ sys_chdir((const char __force_user *)"/old");
61259 return;
61260 }
61261
61262@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61263 mount_root();
61264
61265 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61266- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61267+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61268 if (!error)
61269 printk("okay\n");
61270 else {
61271- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61272+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61273 if (error == -ENOENT)
61274 printk("/initrd does not exist. Ignored.\n");
61275 else
61276 printk("failed\n");
61277 printk(KERN_NOTICE "Unmounting old root\n");
61278- sys_umount("/old", MNT_DETACH);
61279+ sys_umount((char __force_user *)"/old", MNT_DETACH);
61280 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61281 if (fd < 0) {
61282 error = fd;
61283@@ -116,11 +116,11 @@ int __init initrd_load(void)
61284 * mounted in the normal path.
61285 */
61286 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61287- sys_unlink("/initrd.image");
61288+ sys_unlink((const char __force_user *)"/initrd.image");
61289 handle_initrd();
61290 return 1;
61291 }
61292 }
61293- sys_unlink("/initrd.image");
61294+ sys_unlink((const char __force_user *)"/initrd.image");
61295 return 0;
61296 }
61297diff -urNp linux-3.0.7/init/do_mounts_md.c linux-3.0.7/init/do_mounts_md.c
61298--- linux-3.0.7/init/do_mounts_md.c 2011-07-21 22:17:23.000000000 -0400
61299+++ linux-3.0.7/init/do_mounts_md.c 2011-10-06 04:17:55.000000000 -0400
61300@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61301 partitioned ? "_d" : "", minor,
61302 md_setup_args[ent].device_names);
61303
61304- fd = sys_open(name, 0, 0);
61305+ fd = sys_open((char __force_user *)name, 0, 0);
61306 if (fd < 0) {
61307 printk(KERN_ERR "md: open failed - cannot start "
61308 "array %s\n", name);
61309@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61310 * array without it
61311 */
61312 sys_close(fd);
61313- fd = sys_open(name, 0, 0);
61314+ fd = sys_open((char __force_user *)name, 0, 0);
61315 sys_ioctl(fd, BLKRRPART, 0);
61316 }
61317 sys_close(fd);
61318@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61319
61320 wait_for_device_probe();
61321
61322- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61323+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61324 if (fd >= 0) {
61325 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61326 sys_close(fd);
61327diff -urNp linux-3.0.7/init/initramfs.c linux-3.0.7/init/initramfs.c
61328--- linux-3.0.7/init/initramfs.c 2011-07-21 22:17:23.000000000 -0400
61329+++ linux-3.0.7/init/initramfs.c 2011-10-06 04:17:55.000000000 -0400
61330@@ -74,7 +74,7 @@ static void __init free_hash(void)
61331 }
61332 }
61333
61334-static long __init do_utime(char __user *filename, time_t mtime)
61335+static long __init do_utime(__force char __user *filename, time_t mtime)
61336 {
61337 struct timespec t[2];
61338
61339@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61340 struct dir_entry *de, *tmp;
61341 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61342 list_del(&de->list);
61343- do_utime(de->name, de->mtime);
61344+ do_utime((char __force_user *)de->name, de->mtime);
61345 kfree(de->name);
61346 kfree(de);
61347 }
61348@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61349 if (nlink >= 2) {
61350 char *old = find_link(major, minor, ino, mode, collected);
61351 if (old)
61352- return (sys_link(old, collected) < 0) ? -1 : 1;
61353+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61354 }
61355 return 0;
61356 }
61357@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61358 {
61359 struct stat st;
61360
61361- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61362+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61363 if (S_ISDIR(st.st_mode))
61364- sys_rmdir(path);
61365+ sys_rmdir((char __force_user *)path);
61366 else
61367- sys_unlink(path);
61368+ sys_unlink((char __force_user *)path);
61369 }
61370 }
61371
61372@@ -305,7 +305,7 @@ static int __init do_name(void)
61373 int openflags = O_WRONLY|O_CREAT;
61374 if (ml != 1)
61375 openflags |= O_TRUNC;
61376- wfd = sys_open(collected, openflags, mode);
61377+ wfd = sys_open((char __force_user *)collected, openflags, mode);
61378
61379 if (wfd >= 0) {
61380 sys_fchown(wfd, uid, gid);
61381@@ -317,17 +317,17 @@ static int __init do_name(void)
61382 }
61383 }
61384 } else if (S_ISDIR(mode)) {
61385- sys_mkdir(collected, mode);
61386- sys_chown(collected, uid, gid);
61387- sys_chmod(collected, mode);
61388+ sys_mkdir((char __force_user *)collected, mode);
61389+ sys_chown((char __force_user *)collected, uid, gid);
61390+ sys_chmod((char __force_user *)collected, mode);
61391 dir_add(collected, mtime);
61392 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61393 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61394 if (maybe_link() == 0) {
61395- sys_mknod(collected, mode, rdev);
61396- sys_chown(collected, uid, gid);
61397- sys_chmod(collected, mode);
61398- do_utime(collected, mtime);
61399+ sys_mknod((char __force_user *)collected, mode, rdev);
61400+ sys_chown((char __force_user *)collected, uid, gid);
61401+ sys_chmod((char __force_user *)collected, mode);
61402+ do_utime((char __force_user *)collected, mtime);
61403 }
61404 }
61405 return 0;
61406@@ -336,15 +336,15 @@ static int __init do_name(void)
61407 static int __init do_copy(void)
61408 {
61409 if (count >= body_len) {
61410- sys_write(wfd, victim, body_len);
61411+ sys_write(wfd, (char __force_user *)victim, body_len);
61412 sys_close(wfd);
61413- do_utime(vcollected, mtime);
61414+ do_utime((char __force_user *)vcollected, mtime);
61415 kfree(vcollected);
61416 eat(body_len);
61417 state = SkipIt;
61418 return 0;
61419 } else {
61420- sys_write(wfd, victim, count);
61421+ sys_write(wfd, (char __force_user *)victim, count);
61422 body_len -= count;
61423 eat(count);
61424 return 1;
61425@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61426 {
61427 collected[N_ALIGN(name_len) + body_len] = '\0';
61428 clean_path(collected, 0);
61429- sys_symlink(collected + N_ALIGN(name_len), collected);
61430- sys_lchown(collected, uid, gid);
61431- do_utime(collected, mtime);
61432+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61433+ sys_lchown((char __force_user *)collected, uid, gid);
61434+ do_utime((char __force_user *)collected, mtime);
61435 state = SkipIt;
61436 next_state = Reset;
61437 return 0;
61438diff -urNp linux-3.0.7/init/Kconfig linux-3.0.7/init/Kconfig
61439--- linux-3.0.7/init/Kconfig 2011-07-21 22:17:23.000000000 -0400
61440+++ linux-3.0.7/init/Kconfig 2011-08-23 21:47:56.000000000 -0400
61441@@ -1195,7 +1195,7 @@ config SLUB_DEBUG
61442
61443 config COMPAT_BRK
61444 bool "Disable heap randomization"
61445- default y
61446+ default n
61447 help
61448 Randomizing heap placement makes heap exploits harder, but it
61449 also breaks ancient binaries (including anything libc5 based).
61450diff -urNp linux-3.0.7/init/main.c linux-3.0.7/init/main.c
61451--- linux-3.0.7/init/main.c 2011-07-21 22:17:23.000000000 -0400
61452+++ linux-3.0.7/init/main.c 2011-10-06 04:17:55.000000000 -0400
61453@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61454 extern void tc_init(void);
61455 #endif
61456
61457+extern void grsecurity_init(void);
61458+
61459 /*
61460 * Debug helper: via this flag we know that we are in 'early bootup code'
61461 * where only the boot processor is running with IRQ disabled. This means
61462@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61463
61464 __setup("reset_devices", set_reset_devices);
61465
61466+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61467+extern char pax_enter_kernel_user[];
61468+extern char pax_exit_kernel_user[];
61469+extern pgdval_t clone_pgd_mask;
61470+#endif
61471+
61472+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61473+static int __init setup_pax_nouderef(char *str)
61474+{
61475+#ifdef CONFIG_X86_32
61476+ unsigned int cpu;
61477+ struct desc_struct *gdt;
61478+
61479+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61480+ gdt = get_cpu_gdt_table(cpu);
61481+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61482+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61483+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61484+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61485+ }
61486+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61487+#else
61488+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61489+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61490+ clone_pgd_mask = ~(pgdval_t)0UL;
61491+#endif
61492+
61493+ return 0;
61494+}
61495+early_param("pax_nouderef", setup_pax_nouderef);
61496+#endif
61497+
61498+#ifdef CONFIG_PAX_SOFTMODE
61499+int pax_softmode;
61500+
61501+static int __init setup_pax_softmode(char *str)
61502+{
61503+ get_option(&str, &pax_softmode);
61504+ return 1;
61505+}
61506+__setup("pax_softmode=", setup_pax_softmode);
61507+#endif
61508+
61509 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61510 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61511 static const char *panic_later, *panic_param;
61512@@ -667,6 +712,7 @@ int __init_or_module do_one_initcall(ini
61513 {
61514 int count = preempt_count();
61515 int ret;
61516+ const char *msg1 = "", *msg2 = "";
61517
61518 if (initcall_debug)
61519 ret = do_one_initcall_debug(fn);
61520@@ -679,15 +725,15 @@ int __init_or_module do_one_initcall(ini
61521 sprintf(msgbuf, "error code %d ", ret);
61522
61523 if (preempt_count() != count) {
61524- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61525+ msg1 = " preemption imbalance";
61526 preempt_count() = count;
61527 }
61528 if (irqs_disabled()) {
61529- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61530+ msg2 = " disabled interrupts";
61531 local_irq_enable();
61532 }
61533- if (msgbuf[0]) {
61534- printk("initcall %pF returned with %s\n", fn, msgbuf);
61535+ if (msgbuf[0] || *msg1 || *msg2) {
61536+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61537 }
61538
61539 return ret;
61540@@ -805,7 +851,7 @@ static int __init kernel_init(void * unu
61541 do_basic_setup();
61542
61543 /* Open the /dev/console on the rootfs, this should never fail */
61544- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
61545+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
61546 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
61547
61548 (void) sys_dup(0);
61549@@ -818,11 +864,13 @@ static int __init kernel_init(void * unu
61550 if (!ramdisk_execute_command)
61551 ramdisk_execute_command = "/init";
61552
61553- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61554+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
61555 ramdisk_execute_command = NULL;
61556 prepare_namespace();
61557 }
61558
61559+ grsecurity_init();
61560+
61561 /*
61562 * Ok, we have completed the initial bootup, and
61563 * we're essentially up and running. Get rid of the
61564diff -urNp linux-3.0.7/ipc/mqueue.c linux-3.0.7/ipc/mqueue.c
61565--- linux-3.0.7/ipc/mqueue.c 2011-10-16 21:54:54.000000000 -0400
61566+++ linux-3.0.7/ipc/mqueue.c 2011-10-16 21:59:31.000000000 -0400
61567@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
61568 mq_bytes = (mq_msg_tblsz +
61569 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61570
61571+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61572 spin_lock(&mq_lock);
61573 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61574 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
61575diff -urNp linux-3.0.7/ipc/msg.c linux-3.0.7/ipc/msg.c
61576--- linux-3.0.7/ipc/msg.c 2011-07-21 22:17:23.000000000 -0400
61577+++ linux-3.0.7/ipc/msg.c 2011-08-23 21:47:56.000000000 -0400
61578@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
61579 return security_msg_queue_associate(msq, msgflg);
61580 }
61581
61582+static struct ipc_ops msg_ops = {
61583+ .getnew = newque,
61584+ .associate = msg_security,
61585+ .more_checks = NULL
61586+};
61587+
61588 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61589 {
61590 struct ipc_namespace *ns;
61591- struct ipc_ops msg_ops;
61592 struct ipc_params msg_params;
61593
61594 ns = current->nsproxy->ipc_ns;
61595
61596- msg_ops.getnew = newque;
61597- msg_ops.associate = msg_security;
61598- msg_ops.more_checks = NULL;
61599-
61600 msg_params.key = key;
61601 msg_params.flg = msgflg;
61602
61603diff -urNp linux-3.0.7/ipc/sem.c linux-3.0.7/ipc/sem.c
61604--- linux-3.0.7/ipc/sem.c 2011-09-02 18:11:21.000000000 -0400
61605+++ linux-3.0.7/ipc/sem.c 2011-08-23 21:48:14.000000000 -0400
61606@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
61607 return 0;
61608 }
61609
61610+static struct ipc_ops sem_ops = {
61611+ .getnew = newary,
61612+ .associate = sem_security,
61613+ .more_checks = sem_more_checks
61614+};
61615+
61616 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61617 {
61618 struct ipc_namespace *ns;
61619- struct ipc_ops sem_ops;
61620 struct ipc_params sem_params;
61621
61622 ns = current->nsproxy->ipc_ns;
61623@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61624 if (nsems < 0 || nsems > ns->sc_semmsl)
61625 return -EINVAL;
61626
61627- sem_ops.getnew = newary;
61628- sem_ops.associate = sem_security;
61629- sem_ops.more_checks = sem_more_checks;
61630-
61631 sem_params.key = key;
61632 sem_params.flg = semflg;
61633 sem_params.u.nsems = nsems;
61634@@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
61635 int nsems;
61636 struct list_head tasks;
61637
61638+ pax_track_stack();
61639+
61640 sma = sem_lock_check(ns, semid);
61641 if (IS_ERR(sma))
61642 return PTR_ERR(sma);
61643@@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61644 struct ipc_namespace *ns;
61645 struct list_head tasks;
61646
61647+ pax_track_stack();
61648+
61649 ns = current->nsproxy->ipc_ns;
61650
61651 if (nsops < 1 || semid < 0)
61652diff -urNp linux-3.0.7/ipc/shm.c linux-3.0.7/ipc/shm.c
61653--- linux-3.0.7/ipc/shm.c 2011-07-21 22:17:23.000000000 -0400
61654+++ linux-3.0.7/ipc/shm.c 2011-08-23 21:48:14.000000000 -0400
61655@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
61656 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
61657 #endif
61658
61659+#ifdef CONFIG_GRKERNSEC
61660+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61661+ const time_t shm_createtime, const uid_t cuid,
61662+ const int shmid);
61663+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61664+ const time_t shm_createtime);
61665+#endif
61666+
61667 void shm_init_ns(struct ipc_namespace *ns)
61668 {
61669 ns->shm_ctlmax = SHMMAX;
61670@@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
61671 shp->shm_lprid = 0;
61672 shp->shm_atim = shp->shm_dtim = 0;
61673 shp->shm_ctim = get_seconds();
61674+#ifdef CONFIG_GRKERNSEC
61675+ {
61676+ struct timespec timeval;
61677+ do_posix_clock_monotonic_gettime(&timeval);
61678+
61679+ shp->shm_createtime = timeval.tv_sec;
61680+ }
61681+#endif
61682 shp->shm_segsz = size;
61683 shp->shm_nattch = 0;
61684 shp->shm_file = file;
61685@@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
61686 return 0;
61687 }
61688
61689+static struct ipc_ops shm_ops = {
61690+ .getnew = newseg,
61691+ .associate = shm_security,
61692+ .more_checks = shm_more_checks
61693+};
61694+
61695 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
61696 {
61697 struct ipc_namespace *ns;
61698- struct ipc_ops shm_ops;
61699 struct ipc_params shm_params;
61700
61701 ns = current->nsproxy->ipc_ns;
61702
61703- shm_ops.getnew = newseg;
61704- shm_ops.associate = shm_security;
61705- shm_ops.more_checks = shm_more_checks;
61706-
61707 shm_params.key = key;
61708 shm_params.flg = shmflg;
61709 shm_params.u.size = size;
61710@@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
61711 case SHM_LOCK:
61712 case SHM_UNLOCK:
61713 {
61714- struct file *uninitialized_var(shm_file);
61715-
61716 lru_add_drain_all(); /* drain pagevecs to lru lists */
61717
61718 shp = shm_lock_check(ns, shmid);
61719@@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
61720 if (err)
61721 goto out_unlock;
61722
61723+#ifdef CONFIG_GRKERNSEC
61724+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
61725+ shp->shm_perm.cuid, shmid) ||
61726+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
61727+ err = -EACCES;
61728+ goto out_unlock;
61729+ }
61730+#endif
61731+
61732 path = shp->shm_file->f_path;
61733 path_get(&path);
61734 shp->shm_nattch++;
61735+#ifdef CONFIG_GRKERNSEC
61736+ shp->shm_lapid = current->pid;
61737+#endif
61738 size = i_size_read(path.dentry->d_inode);
61739 shm_unlock(shp);
61740
61741diff -urNp linux-3.0.7/kernel/acct.c linux-3.0.7/kernel/acct.c
61742--- linux-3.0.7/kernel/acct.c 2011-07-21 22:17:23.000000000 -0400
61743+++ linux-3.0.7/kernel/acct.c 2011-10-06 04:17:55.000000000 -0400
61744@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
61745 */
61746 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
61747 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
61748- file->f_op->write(file, (char *)&ac,
61749+ file->f_op->write(file, (char __force_user *)&ac,
61750 sizeof(acct_t), &file->f_pos);
61751 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
61752 set_fs(fs);
61753diff -urNp linux-3.0.7/kernel/audit.c linux-3.0.7/kernel/audit.c
61754--- linux-3.0.7/kernel/audit.c 2011-07-21 22:17:23.000000000 -0400
61755+++ linux-3.0.7/kernel/audit.c 2011-08-23 21:47:56.000000000 -0400
61756@@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
61757 3) suppressed due to audit_rate_limit
61758 4) suppressed due to audit_backlog_limit
61759 */
61760-static atomic_t audit_lost = ATOMIC_INIT(0);
61761+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
61762
61763 /* The netlink socket. */
61764 static struct sock *audit_sock;
61765@@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
61766 unsigned long now;
61767 int print;
61768
61769- atomic_inc(&audit_lost);
61770+ atomic_inc_unchecked(&audit_lost);
61771
61772 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
61773
61774@@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
61775 printk(KERN_WARNING
61776 "audit: audit_lost=%d audit_rate_limit=%d "
61777 "audit_backlog_limit=%d\n",
61778- atomic_read(&audit_lost),
61779+ atomic_read_unchecked(&audit_lost),
61780 audit_rate_limit,
61781 audit_backlog_limit);
61782 audit_panic(message);
61783@@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
61784 status_set.pid = audit_pid;
61785 status_set.rate_limit = audit_rate_limit;
61786 status_set.backlog_limit = audit_backlog_limit;
61787- status_set.lost = atomic_read(&audit_lost);
61788+ status_set.lost = atomic_read_unchecked(&audit_lost);
61789 status_set.backlog = skb_queue_len(&audit_skb_queue);
61790 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
61791 &status_set, sizeof(status_set));
61792diff -urNp linux-3.0.7/kernel/auditsc.c linux-3.0.7/kernel/auditsc.c
61793--- linux-3.0.7/kernel/auditsc.c 2011-07-21 22:17:23.000000000 -0400
61794+++ linux-3.0.7/kernel/auditsc.c 2011-08-23 21:47:56.000000000 -0400
61795@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
61796 }
61797
61798 /* global counter which is incremented every time something logs in */
61799-static atomic_t session_id = ATOMIC_INIT(0);
61800+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
61801
61802 /**
61803 * audit_set_loginuid - set a task's audit_context loginuid
61804@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
61805 */
61806 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
61807 {
61808- unsigned int sessionid = atomic_inc_return(&session_id);
61809+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
61810 struct audit_context *context = task->audit_context;
61811
61812 if (context && context->in_syscall) {
61813diff -urNp linux-3.0.7/kernel/capability.c linux-3.0.7/kernel/capability.c
61814--- linux-3.0.7/kernel/capability.c 2011-07-21 22:17:23.000000000 -0400
61815+++ linux-3.0.7/kernel/capability.c 2011-08-23 21:48:14.000000000 -0400
61816@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
61817 * before modification is attempted and the application
61818 * fails.
61819 */
61820+ if (tocopy > ARRAY_SIZE(kdata))
61821+ return -EFAULT;
61822+
61823 if (copy_to_user(dataptr, kdata, tocopy
61824 * sizeof(struct __user_cap_data_struct))) {
61825 return -EFAULT;
61826@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
61827 BUG();
61828 }
61829
61830- if (security_capable(ns, current_cred(), cap) == 0) {
61831+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
61832 current->flags |= PF_SUPERPRIV;
61833 return true;
61834 }
61835@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
61836 }
61837 EXPORT_SYMBOL(ns_capable);
61838
61839+bool ns_capable_nolog(struct user_namespace *ns, int cap)
61840+{
61841+ if (unlikely(!cap_valid(cap))) {
61842+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
61843+ BUG();
61844+ }
61845+
61846+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
61847+ current->flags |= PF_SUPERPRIV;
61848+ return true;
61849+ }
61850+ return false;
61851+}
61852+EXPORT_SYMBOL(ns_capable_nolog);
61853+
61854+bool capable_nolog(int cap)
61855+{
61856+ return ns_capable_nolog(&init_user_ns, cap);
61857+}
61858+EXPORT_SYMBOL(capable_nolog);
61859+
61860 /**
61861 * task_ns_capable - Determine whether current task has a superior
61862 * capability targeted at a specific task's user namespace.
61863@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
61864 }
61865 EXPORT_SYMBOL(task_ns_capable);
61866
61867+bool task_ns_capable_nolog(struct task_struct *t, int cap)
61868+{
61869+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
61870+}
61871+EXPORT_SYMBOL(task_ns_capable_nolog);
61872+
61873 /**
61874 * nsown_capable - Check superior capability to one's own user_ns
61875 * @cap: The capability in question
61876diff -urNp linux-3.0.7/kernel/cgroup.c linux-3.0.7/kernel/cgroup.c
61877--- linux-3.0.7/kernel/cgroup.c 2011-07-21 22:17:23.000000000 -0400
61878+++ linux-3.0.7/kernel/cgroup.c 2011-08-23 21:48:14.000000000 -0400
61879@@ -593,6 +593,8 @@ static struct css_set *find_css_set(
61880 struct hlist_head *hhead;
61881 struct cg_cgroup_link *link;
61882
61883+ pax_track_stack();
61884+
61885 /* First see if we already have a cgroup group that matches
61886 * the desired set */
61887 read_lock(&css_set_lock);
61888diff -urNp linux-3.0.7/kernel/compat.c linux-3.0.7/kernel/compat.c
61889--- linux-3.0.7/kernel/compat.c 2011-07-21 22:17:23.000000000 -0400
61890+++ linux-3.0.7/kernel/compat.c 2011-10-06 04:17:55.000000000 -0400
61891@@ -13,6 +13,7 @@
61892
61893 #include <linux/linkage.h>
61894 #include <linux/compat.h>
61895+#include <linux/module.h>
61896 #include <linux/errno.h>
61897 #include <linux/time.h>
61898 #include <linux/signal.h>
61899@@ -166,7 +167,7 @@ static long compat_nanosleep_restart(str
61900 mm_segment_t oldfs;
61901 long ret;
61902
61903- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
61904+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
61905 oldfs = get_fs();
61906 set_fs(KERNEL_DS);
61907 ret = hrtimer_nanosleep_restart(restart);
61908@@ -198,7 +199,7 @@ asmlinkage long compat_sys_nanosleep(str
61909 oldfs = get_fs();
61910 set_fs(KERNEL_DS);
61911 ret = hrtimer_nanosleep(&tu,
61912- rmtp ? (struct timespec __user *)&rmt : NULL,
61913+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
61914 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
61915 set_fs(oldfs);
61916
61917@@ -307,7 +308,7 @@ asmlinkage long compat_sys_sigpending(co
61918 mm_segment_t old_fs = get_fs();
61919
61920 set_fs(KERNEL_DS);
61921- ret = sys_sigpending((old_sigset_t __user *) &s);
61922+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
61923 set_fs(old_fs);
61924 if (ret == 0)
61925 ret = put_user(s, set);
61926@@ -330,8 +331,8 @@ asmlinkage long compat_sys_sigprocmask(i
61927 old_fs = get_fs();
61928 set_fs(KERNEL_DS);
61929 ret = sys_sigprocmask(how,
61930- set ? (old_sigset_t __user *) &s : NULL,
61931- oset ? (old_sigset_t __user *) &s : NULL);
61932+ set ? (old_sigset_t __force_user *) &s : NULL,
61933+ oset ? (old_sigset_t __force_user *) &s : NULL);
61934 set_fs(old_fs);
61935 if (ret == 0)
61936 if (oset)
61937@@ -368,7 +369,7 @@ asmlinkage long compat_sys_old_getrlimit
61938 mm_segment_t old_fs = get_fs();
61939
61940 set_fs(KERNEL_DS);
61941- ret = sys_old_getrlimit(resource, &r);
61942+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
61943 set_fs(old_fs);
61944
61945 if (!ret) {
61946@@ -440,7 +441,7 @@ asmlinkage long compat_sys_getrusage(int
61947 mm_segment_t old_fs = get_fs();
61948
61949 set_fs(KERNEL_DS);
61950- ret = sys_getrusage(who, (struct rusage __user *) &r);
61951+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
61952 set_fs(old_fs);
61953
61954 if (ret)
61955@@ -467,8 +468,8 @@ compat_sys_wait4(compat_pid_t pid, compa
61956 set_fs (KERNEL_DS);
61957 ret = sys_wait4(pid,
61958 (stat_addr ?
61959- (unsigned int __user *) &status : NULL),
61960- options, (struct rusage __user *) &r);
61961+ (unsigned int __force_user *) &status : NULL),
61962+ options, (struct rusage __force_user *) &r);
61963 set_fs (old_fs);
61964
61965 if (ret > 0) {
61966@@ -493,8 +494,8 @@ asmlinkage long compat_sys_waitid(int wh
61967 memset(&info, 0, sizeof(info));
61968
61969 set_fs(KERNEL_DS);
61970- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
61971- uru ? (struct rusage __user *)&ru : NULL);
61972+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
61973+ uru ? (struct rusage __force_user *)&ru : NULL);
61974 set_fs(old_fs);
61975
61976 if ((ret < 0) || (info.si_signo == 0))
61977@@ -624,8 +625,8 @@ long compat_sys_timer_settime(timer_t ti
61978 oldfs = get_fs();
61979 set_fs(KERNEL_DS);
61980 err = sys_timer_settime(timer_id, flags,
61981- (struct itimerspec __user *) &newts,
61982- (struct itimerspec __user *) &oldts);
61983+ (struct itimerspec __force_user *) &newts,
61984+ (struct itimerspec __force_user *) &oldts);
61985 set_fs(oldfs);
61986 if (!err && old && put_compat_itimerspec(old, &oldts))
61987 return -EFAULT;
61988@@ -642,7 +643,7 @@ long compat_sys_timer_gettime(timer_t ti
61989 oldfs = get_fs();
61990 set_fs(KERNEL_DS);
61991 err = sys_timer_gettime(timer_id,
61992- (struct itimerspec __user *) &ts);
61993+ (struct itimerspec __force_user *) &ts);
61994 set_fs(oldfs);
61995 if (!err && put_compat_itimerspec(setting, &ts))
61996 return -EFAULT;
61997@@ -661,7 +662,7 @@ long compat_sys_clock_settime(clockid_t
61998 oldfs = get_fs();
61999 set_fs(KERNEL_DS);
62000 err = sys_clock_settime(which_clock,
62001- (struct timespec __user *) &ts);
62002+ (struct timespec __force_user *) &ts);
62003 set_fs(oldfs);
62004 return err;
62005 }
62006@@ -676,7 +677,7 @@ long compat_sys_clock_gettime(clockid_t
62007 oldfs = get_fs();
62008 set_fs(KERNEL_DS);
62009 err = sys_clock_gettime(which_clock,
62010- (struct timespec __user *) &ts);
62011+ (struct timespec __force_user *) &ts);
62012 set_fs(oldfs);
62013 if (!err && put_compat_timespec(&ts, tp))
62014 return -EFAULT;
62015@@ -696,7 +697,7 @@ long compat_sys_clock_adjtime(clockid_t
62016
62017 oldfs = get_fs();
62018 set_fs(KERNEL_DS);
62019- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62020+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62021 set_fs(oldfs);
62022
62023 err = compat_put_timex(utp, &txc);
62024@@ -716,7 +717,7 @@ long compat_sys_clock_getres(clockid_t w
62025 oldfs = get_fs();
62026 set_fs(KERNEL_DS);
62027 err = sys_clock_getres(which_clock,
62028- (struct timespec __user *) &ts);
62029+ (struct timespec __force_user *) &ts);
62030 set_fs(oldfs);
62031 if (!err && tp && put_compat_timespec(&ts, tp))
62032 return -EFAULT;
62033@@ -728,9 +729,9 @@ static long compat_clock_nanosleep_resta
62034 long err;
62035 mm_segment_t oldfs;
62036 struct timespec tu;
62037- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62038+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62039
62040- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62041+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62042 oldfs = get_fs();
62043 set_fs(KERNEL_DS);
62044 err = clock_nanosleep_restart(restart);
62045@@ -762,8 +763,8 @@ long compat_sys_clock_nanosleep(clockid_
62046 oldfs = get_fs();
62047 set_fs(KERNEL_DS);
62048 err = sys_clock_nanosleep(which_clock, flags,
62049- (struct timespec __user *) &in,
62050- (struct timespec __user *) &out);
62051+ (struct timespec __force_user *) &in,
62052+ (struct timespec __force_user *) &out);
62053 set_fs(oldfs);
62054
62055 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62056diff -urNp linux-3.0.7/kernel/configs.c linux-3.0.7/kernel/configs.c
62057--- linux-3.0.7/kernel/configs.c 2011-07-21 22:17:23.000000000 -0400
62058+++ linux-3.0.7/kernel/configs.c 2011-08-23 21:48:14.000000000 -0400
62059@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62060 struct proc_dir_entry *entry;
62061
62062 /* create the current config file */
62063+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62064+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62065+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62066+ &ikconfig_file_ops);
62067+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62068+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62069+ &ikconfig_file_ops);
62070+#endif
62071+#else
62072 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62073 &ikconfig_file_ops);
62074+#endif
62075+
62076 if (!entry)
62077 return -ENOMEM;
62078
62079diff -urNp linux-3.0.7/kernel/cred.c linux-3.0.7/kernel/cred.c
62080--- linux-3.0.7/kernel/cred.c 2011-07-21 22:17:23.000000000 -0400
62081+++ linux-3.0.7/kernel/cred.c 2011-08-25 17:23:03.000000000 -0400
62082@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62083 */
62084 void __put_cred(struct cred *cred)
62085 {
62086+ pax_track_stack();
62087+
62088 kdebug("__put_cred(%p{%d,%d})", cred,
62089 atomic_read(&cred->usage),
62090 read_cred_subscribers(cred));
62091@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62092 {
62093 struct cred *cred;
62094
62095+ pax_track_stack();
62096+
62097 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62098 atomic_read(&tsk->cred->usage),
62099 read_cred_subscribers(tsk->cred));
62100@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62101 {
62102 const struct cred *cred;
62103
62104+ pax_track_stack();
62105+
62106 rcu_read_lock();
62107
62108 do {
62109@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62110 {
62111 struct cred *new;
62112
62113+ pax_track_stack();
62114+
62115 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62116 if (!new)
62117 return NULL;
62118@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62119 const struct cred *old;
62120 struct cred *new;
62121
62122+ pax_track_stack();
62123+
62124 validate_process_creds();
62125
62126 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62127@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62128 struct thread_group_cred *tgcred = NULL;
62129 struct cred *new;
62130
62131+ pax_track_stack();
62132+
62133 #ifdef CONFIG_KEYS
62134 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62135 if (!tgcred)
62136@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62137 struct cred *new;
62138 int ret;
62139
62140+ pax_track_stack();
62141+
62142 if (
62143 #ifdef CONFIG_KEYS
62144 !p->cred->thread_keyring &&
62145@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62146 struct task_struct *task = current;
62147 const struct cred *old = task->real_cred;
62148
62149+ pax_track_stack();
62150+
62151 kdebug("commit_creds(%p{%d,%d})", new,
62152 atomic_read(&new->usage),
62153 read_cred_subscribers(new));
62154@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62155
62156 get_cred(new); /* we will require a ref for the subj creds too */
62157
62158+ gr_set_role_label(task, new->uid, new->gid);
62159+
62160 /* dumpability changes */
62161 if (old->euid != new->euid ||
62162 old->egid != new->egid ||
62163@@ -508,10 +526,8 @@ int commit_creds(struct cred *new)
62164 key_fsgid_changed(task);
62165
62166 /* do it
62167- * - What if a process setreuid()'s and this brings the
62168- * new uid over his NPROC rlimit? We can check this now
62169- * cheaply with the new uid cache, so if it matters
62170- * we should be checking for it. -DaveM
62171+ * RLIMIT_NPROC limits on user->processes have already been checked
62172+ * in set_user().
62173 */
62174 alter_cred_subscribers(new, 2);
62175 if (new->user != old->user)
62176@@ -551,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62177 */
62178 void abort_creds(struct cred *new)
62179 {
62180+ pax_track_stack();
62181+
62182 kdebug("abort_creds(%p{%d,%d})", new,
62183 atomic_read(&new->usage),
62184 read_cred_subscribers(new));
62185@@ -574,6 +592,8 @@ const struct cred *override_creds(const
62186 {
62187 const struct cred *old = current->cred;
62188
62189+ pax_track_stack();
62190+
62191 kdebug("override_creds(%p{%d,%d})", new,
62192 atomic_read(&new->usage),
62193 read_cred_subscribers(new));
62194@@ -603,6 +623,8 @@ void revert_creds(const struct cred *old
62195 {
62196 const struct cred *override = current->cred;
62197
62198+ pax_track_stack();
62199+
62200 kdebug("revert_creds(%p{%d,%d})", old,
62201 atomic_read(&old->usage),
62202 read_cred_subscribers(old));
62203@@ -649,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62204 const struct cred *old;
62205 struct cred *new;
62206
62207+ pax_track_stack();
62208+
62209 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62210 if (!new)
62211 return NULL;
62212@@ -703,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62213 */
62214 int set_security_override(struct cred *new, u32 secid)
62215 {
62216+ pax_track_stack();
62217+
62218 return security_kernel_act_as(new, secid);
62219 }
62220 EXPORT_SYMBOL(set_security_override);
62221@@ -722,6 +748,8 @@ int set_security_override_from_ctx(struc
62222 u32 secid;
62223 int ret;
62224
62225+ pax_track_stack();
62226+
62227 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62228 if (ret < 0)
62229 return ret;
62230diff -urNp linux-3.0.7/kernel/debug/debug_core.c linux-3.0.7/kernel/debug/debug_core.c
62231--- linux-3.0.7/kernel/debug/debug_core.c 2011-07-21 22:17:23.000000000 -0400
62232+++ linux-3.0.7/kernel/debug/debug_core.c 2011-08-23 21:47:56.000000000 -0400
62233@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62234 */
62235 static atomic_t masters_in_kgdb;
62236 static atomic_t slaves_in_kgdb;
62237-static atomic_t kgdb_break_tasklet_var;
62238+static atomic_unchecked_t kgdb_break_tasklet_var;
62239 atomic_t kgdb_setting_breakpoint;
62240
62241 struct task_struct *kgdb_usethread;
62242@@ -129,7 +129,7 @@ int kgdb_single_step;
62243 static pid_t kgdb_sstep_pid;
62244
62245 /* to keep track of the CPU which is doing the single stepping*/
62246-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62247+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62248
62249 /*
62250 * If you are debugging a problem where roundup (the collection of
62251@@ -542,7 +542,7 @@ return_normal:
62252 * kernel will only try for the value of sstep_tries before
62253 * giving up and continuing on.
62254 */
62255- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62256+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62257 (kgdb_info[cpu].task &&
62258 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62259 atomic_set(&kgdb_active, -1);
62260@@ -636,8 +636,8 @@ cpu_master_loop:
62261 }
62262
62263 kgdb_restore:
62264- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62265- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62266+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62267+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62268 if (kgdb_info[sstep_cpu].task)
62269 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62270 else
62271@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62272 static void kgdb_tasklet_bpt(unsigned long ing)
62273 {
62274 kgdb_breakpoint();
62275- atomic_set(&kgdb_break_tasklet_var, 0);
62276+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62277 }
62278
62279 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62280
62281 void kgdb_schedule_breakpoint(void)
62282 {
62283- if (atomic_read(&kgdb_break_tasklet_var) ||
62284+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62285 atomic_read(&kgdb_active) != -1 ||
62286 atomic_read(&kgdb_setting_breakpoint))
62287 return;
62288- atomic_inc(&kgdb_break_tasklet_var);
62289+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62290 tasklet_schedule(&kgdb_tasklet_breakpoint);
62291 }
62292 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62293diff -urNp linux-3.0.7/kernel/debug/kdb/kdb_main.c linux-3.0.7/kernel/debug/kdb/kdb_main.c
62294--- linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-07-21 22:17:23.000000000 -0400
62295+++ linux-3.0.7/kernel/debug/kdb/kdb_main.c 2011-08-23 21:47:56.000000000 -0400
62296@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62297 list_for_each_entry(mod, kdb_modules, list) {
62298
62299 kdb_printf("%-20s%8u 0x%p ", mod->name,
62300- mod->core_size, (void *)mod);
62301+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62302 #ifdef CONFIG_MODULE_UNLOAD
62303 kdb_printf("%4d ", module_refcount(mod));
62304 #endif
62305@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62306 kdb_printf(" (Loading)");
62307 else
62308 kdb_printf(" (Live)");
62309- kdb_printf(" 0x%p", mod->module_core);
62310+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62311
62312 #ifdef CONFIG_MODULE_UNLOAD
62313 {
62314diff -urNp linux-3.0.7/kernel/events/core.c linux-3.0.7/kernel/events/core.c
62315--- linux-3.0.7/kernel/events/core.c 2011-09-02 18:11:21.000000000 -0400
62316+++ linux-3.0.7/kernel/events/core.c 2011-09-14 09:08:05.000000000 -0400
62317@@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
62318 return 0;
62319 }
62320
62321-static atomic64_t perf_event_id;
62322+static atomic64_unchecked_t perf_event_id;
62323
62324 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62325 enum event_type_t event_type);
62326@@ -2488,7 +2488,7 @@ static void __perf_event_read(void *info
62327
62328 static inline u64 perf_event_count(struct perf_event *event)
62329 {
62330- return local64_read(&event->count) + atomic64_read(&event->child_count);
62331+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62332 }
62333
62334 static u64 perf_event_read(struct perf_event *event)
62335@@ -3023,9 +3023,9 @@ u64 perf_event_read_value(struct perf_ev
62336 mutex_lock(&event->child_mutex);
62337 total += perf_event_read(event);
62338 *enabled += event->total_time_enabled +
62339- atomic64_read(&event->child_total_time_enabled);
62340+ atomic64_read_unchecked(&event->child_total_time_enabled);
62341 *running += event->total_time_running +
62342- atomic64_read(&event->child_total_time_running);
62343+ atomic64_read_unchecked(&event->child_total_time_running);
62344
62345 list_for_each_entry(child, &event->child_list, child_list) {
62346 total += perf_event_read(child);
62347@@ -3388,10 +3388,10 @@ void perf_event_update_userpage(struct p
62348 userpg->offset -= local64_read(&event->hw.prev_count);
62349
62350 userpg->time_enabled = event->total_time_enabled +
62351- atomic64_read(&event->child_total_time_enabled);
62352+ atomic64_read_unchecked(&event->child_total_time_enabled);
62353
62354 userpg->time_running = event->total_time_running +
62355- atomic64_read(&event->child_total_time_running);
62356+ atomic64_read_unchecked(&event->child_total_time_running);
62357
62358 barrier();
62359 ++userpg->lock;
62360@@ -4188,11 +4188,11 @@ static void perf_output_read_one(struct
62361 values[n++] = perf_event_count(event);
62362 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62363 values[n++] = enabled +
62364- atomic64_read(&event->child_total_time_enabled);
62365+ atomic64_read_unchecked(&event->child_total_time_enabled);
62366 }
62367 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62368 values[n++] = running +
62369- atomic64_read(&event->child_total_time_running);
62370+ atomic64_read_unchecked(&event->child_total_time_running);
62371 }
62372 if (read_format & PERF_FORMAT_ID)
62373 values[n++] = primary_event_id(event);
62374@@ -4833,12 +4833,12 @@ static void perf_event_mmap_event(struct
62375 * need to add enough zero bytes after the string to handle
62376 * the 64bit alignment we do later.
62377 */
62378- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62379+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62380 if (!buf) {
62381 name = strncpy(tmp, "//enomem", sizeof(tmp));
62382 goto got_name;
62383 }
62384- name = d_path(&file->f_path, buf, PATH_MAX);
62385+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62386 if (IS_ERR(name)) {
62387 name = strncpy(tmp, "//toolong", sizeof(tmp));
62388 goto got_name;
62389@@ -6190,7 +6190,7 @@ perf_event_alloc(struct perf_event_attr
62390 event->parent = parent_event;
62391
62392 event->ns = get_pid_ns(current->nsproxy->pid_ns);
62393- event->id = atomic64_inc_return(&perf_event_id);
62394+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62395
62396 event->state = PERF_EVENT_STATE_INACTIVE;
62397
62398@@ -6713,10 +6713,10 @@ static void sync_child_event(struct perf
62399 /*
62400 * Add back the child's count to the parent's count:
62401 */
62402- atomic64_add(child_val, &parent_event->child_count);
62403- atomic64_add(child_event->total_time_enabled,
62404+ atomic64_add_unchecked(child_val, &parent_event->child_count);
62405+ atomic64_add_unchecked(child_event->total_time_enabled,
62406 &parent_event->child_total_time_enabled);
62407- atomic64_add(child_event->total_time_running,
62408+ atomic64_add_unchecked(child_event->total_time_running,
62409 &parent_event->child_total_time_running);
62410
62411 /*
62412diff -urNp linux-3.0.7/kernel/exit.c linux-3.0.7/kernel/exit.c
62413--- linux-3.0.7/kernel/exit.c 2011-07-21 22:17:23.000000000 -0400
62414+++ linux-3.0.7/kernel/exit.c 2011-08-23 21:48:14.000000000 -0400
62415@@ -57,6 +57,10 @@
62416 #include <asm/pgtable.h>
62417 #include <asm/mmu_context.h>
62418
62419+#ifdef CONFIG_GRKERNSEC
62420+extern rwlock_t grsec_exec_file_lock;
62421+#endif
62422+
62423 static void exit_mm(struct task_struct * tsk);
62424
62425 static void __unhash_process(struct task_struct *p, bool group_dead)
62426@@ -169,6 +173,10 @@ void release_task(struct task_struct * p
62427 struct task_struct *leader;
62428 int zap_leader;
62429 repeat:
62430+#ifdef CONFIG_NET
62431+ gr_del_task_from_ip_table(p);
62432+#endif
62433+
62434 tracehook_prepare_release_task(p);
62435 /* don't need to get the RCU readlock here - the process is dead and
62436 * can't be modifying its own credentials. But shut RCU-lockdep up */
62437@@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
62438 {
62439 write_lock_irq(&tasklist_lock);
62440
62441+#ifdef CONFIG_GRKERNSEC
62442+ write_lock(&grsec_exec_file_lock);
62443+ if (current->exec_file) {
62444+ fput(current->exec_file);
62445+ current->exec_file = NULL;
62446+ }
62447+ write_unlock(&grsec_exec_file_lock);
62448+#endif
62449+
62450 ptrace_unlink(current);
62451 /* Reparent to init */
62452 current->real_parent = current->parent = kthreadd_task;
62453 list_move_tail(&current->sibling, &current->real_parent->children);
62454
62455+ gr_set_kernel_label(current);
62456+
62457 /* Set the exit signal to SIGCHLD so we signal init on exit */
62458 current->exit_signal = SIGCHLD;
62459
62460@@ -394,7 +413,7 @@ int allow_signal(int sig)
62461 * know it'll be handled, so that they don't get converted to
62462 * SIGKILL or just silently dropped.
62463 */
62464- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62465+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62466 recalc_sigpending();
62467 spin_unlock_irq(&current->sighand->siglock);
62468 return 0;
62469@@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
62470 vsnprintf(current->comm, sizeof(current->comm), name, args);
62471 va_end(args);
62472
62473+#ifdef CONFIG_GRKERNSEC
62474+ write_lock(&grsec_exec_file_lock);
62475+ if (current->exec_file) {
62476+ fput(current->exec_file);
62477+ current->exec_file = NULL;
62478+ }
62479+ write_unlock(&grsec_exec_file_lock);
62480+#endif
62481+
62482+ gr_set_kernel_label(current);
62483+
62484 /*
62485 * If we were started as result of loading a module, close all of the
62486 * user space pages. We don't need them, and if we didn't close them
62487@@ -904,15 +934,8 @@ NORET_TYPE void do_exit(long code)
62488 struct task_struct *tsk = current;
62489 int group_dead;
62490
62491- profile_task_exit(tsk);
62492-
62493- WARN_ON(atomic_read(&tsk->fs_excl));
62494- WARN_ON(blk_needs_flush_plug(tsk));
62495-
62496 if (unlikely(in_interrupt()))
62497 panic("Aiee, killing interrupt handler!");
62498- if (unlikely(!tsk->pid))
62499- panic("Attempted to kill the idle task!");
62500
62501 /*
62502 * If do_exit is called because this processes oopsed, it's possible
62503@@ -923,6 +946,14 @@ NORET_TYPE void do_exit(long code)
62504 */
62505 set_fs(USER_DS);
62506
62507+ profile_task_exit(tsk);
62508+
62509+ WARN_ON(atomic_read(&tsk->fs_excl));
62510+ WARN_ON(blk_needs_flush_plug(tsk));
62511+
62512+ if (unlikely(!tsk->pid))
62513+ panic("Attempted to kill the idle task!");
62514+
62515 tracehook_report_exit(&code);
62516
62517 validate_creds_for_do_exit(tsk);
62518@@ -983,6 +1014,9 @@ NORET_TYPE void do_exit(long code)
62519 tsk->exit_code = code;
62520 taskstats_exit(tsk, group_dead);
62521
62522+ gr_acl_handle_psacct(tsk, code);
62523+ gr_acl_handle_exit();
62524+
62525 exit_mm(tsk);
62526
62527 if (group_dead)
62528diff -urNp linux-3.0.7/kernel/fork.c linux-3.0.7/kernel/fork.c
62529--- linux-3.0.7/kernel/fork.c 2011-07-21 22:17:23.000000000 -0400
62530+++ linux-3.0.7/kernel/fork.c 2011-08-25 17:23:36.000000000 -0400
62531@@ -286,7 +286,7 @@ static struct task_struct *dup_task_stru
62532 *stackend = STACK_END_MAGIC; /* for overflow detection */
62533
62534 #ifdef CONFIG_CC_STACKPROTECTOR
62535- tsk->stack_canary = get_random_int();
62536+ tsk->stack_canary = pax_get_random_long();
62537 #endif
62538
62539 /* One for us, one for whoever does the "release_task()" (usually parent) */
62540@@ -308,13 +308,77 @@ out:
62541 }
62542
62543 #ifdef CONFIG_MMU
62544+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
62545+{
62546+ struct vm_area_struct *tmp;
62547+ unsigned long charge;
62548+ struct mempolicy *pol;
62549+ struct file *file;
62550+
62551+ charge = 0;
62552+ if (mpnt->vm_flags & VM_ACCOUNT) {
62553+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62554+ if (security_vm_enough_memory(len))
62555+ goto fail_nomem;
62556+ charge = len;
62557+ }
62558+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62559+ if (!tmp)
62560+ goto fail_nomem;
62561+ *tmp = *mpnt;
62562+ tmp->vm_mm = mm;
62563+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
62564+ pol = mpol_dup(vma_policy(mpnt));
62565+ if (IS_ERR(pol))
62566+ goto fail_nomem_policy;
62567+ vma_set_policy(tmp, pol);
62568+ if (anon_vma_fork(tmp, mpnt))
62569+ goto fail_nomem_anon_vma_fork;
62570+ tmp->vm_flags &= ~VM_LOCKED;
62571+ tmp->vm_next = tmp->vm_prev = NULL;
62572+ tmp->vm_mirror = NULL;
62573+ file = tmp->vm_file;
62574+ if (file) {
62575+ struct inode *inode = file->f_path.dentry->d_inode;
62576+ struct address_space *mapping = file->f_mapping;
62577+
62578+ get_file(file);
62579+ if (tmp->vm_flags & VM_DENYWRITE)
62580+ atomic_dec(&inode->i_writecount);
62581+ mutex_lock(&mapping->i_mmap_mutex);
62582+ if (tmp->vm_flags & VM_SHARED)
62583+ mapping->i_mmap_writable++;
62584+ flush_dcache_mmap_lock(mapping);
62585+ /* insert tmp into the share list, just after mpnt */
62586+ vma_prio_tree_add(tmp, mpnt);
62587+ flush_dcache_mmap_unlock(mapping);
62588+ mutex_unlock(&mapping->i_mmap_mutex);
62589+ }
62590+
62591+ /*
62592+ * Clear hugetlb-related page reserves for children. This only
62593+ * affects MAP_PRIVATE mappings. Faults generated by the child
62594+ * are not guaranteed to succeed, even if read-only
62595+ */
62596+ if (is_vm_hugetlb_page(tmp))
62597+ reset_vma_resv_huge_pages(tmp);
62598+
62599+ return tmp;
62600+
62601+fail_nomem_anon_vma_fork:
62602+ mpol_put(pol);
62603+fail_nomem_policy:
62604+ kmem_cache_free(vm_area_cachep, tmp);
62605+fail_nomem:
62606+ vm_unacct_memory(charge);
62607+ return NULL;
62608+}
62609+
62610 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
62611 {
62612 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
62613 struct rb_node **rb_link, *rb_parent;
62614 int retval;
62615- unsigned long charge;
62616- struct mempolicy *pol;
62617
62618 down_write(&oldmm->mmap_sem);
62619 flush_cache_dup_mm(oldmm);
62620@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm
62621 mm->locked_vm = 0;
62622 mm->mmap = NULL;
62623 mm->mmap_cache = NULL;
62624- mm->free_area_cache = oldmm->mmap_base;
62625- mm->cached_hole_size = ~0UL;
62626+ mm->free_area_cache = oldmm->free_area_cache;
62627+ mm->cached_hole_size = oldmm->cached_hole_size;
62628 mm->map_count = 0;
62629 cpumask_clear(mm_cpumask(mm));
62630 mm->mm_rb = RB_ROOT;
62631@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm
62632
62633 prev = NULL;
62634 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
62635- struct file *file;
62636-
62637 if (mpnt->vm_flags & VM_DONTCOPY) {
62638 long pages = vma_pages(mpnt);
62639 mm->total_vm -= pages;
62640@@ -352,55 +414,13 @@ static int dup_mmap(struct mm_struct *mm
62641 -pages);
62642 continue;
62643 }
62644- charge = 0;
62645- if (mpnt->vm_flags & VM_ACCOUNT) {
62646- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
62647- if (security_vm_enough_memory(len))
62648- goto fail_nomem;
62649- charge = len;
62650- }
62651- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62652- if (!tmp)
62653- goto fail_nomem;
62654- *tmp = *mpnt;
62655- INIT_LIST_HEAD(&tmp->anon_vma_chain);
62656- pol = mpol_dup(vma_policy(mpnt));
62657- retval = PTR_ERR(pol);
62658- if (IS_ERR(pol))
62659- goto fail_nomem_policy;
62660- vma_set_policy(tmp, pol);
62661- tmp->vm_mm = mm;
62662- if (anon_vma_fork(tmp, mpnt))
62663- goto fail_nomem_anon_vma_fork;
62664- tmp->vm_flags &= ~VM_LOCKED;
62665- tmp->vm_next = tmp->vm_prev = NULL;
62666- file = tmp->vm_file;
62667- if (file) {
62668- struct inode *inode = file->f_path.dentry->d_inode;
62669- struct address_space *mapping = file->f_mapping;
62670-
62671- get_file(file);
62672- if (tmp->vm_flags & VM_DENYWRITE)
62673- atomic_dec(&inode->i_writecount);
62674- mutex_lock(&mapping->i_mmap_mutex);
62675- if (tmp->vm_flags & VM_SHARED)
62676- mapping->i_mmap_writable++;
62677- flush_dcache_mmap_lock(mapping);
62678- /* insert tmp into the share list, just after mpnt */
62679- vma_prio_tree_add(tmp, mpnt);
62680- flush_dcache_mmap_unlock(mapping);
62681- mutex_unlock(&mapping->i_mmap_mutex);
62682+ tmp = dup_vma(mm, mpnt);
62683+ if (!tmp) {
62684+ retval = -ENOMEM;
62685+ goto out;
62686 }
62687
62688 /*
62689- * Clear hugetlb-related page reserves for children. This only
62690- * affects MAP_PRIVATE mappings. Faults generated by the child
62691- * are not guaranteed to succeed, even if read-only
62692- */
62693- if (is_vm_hugetlb_page(tmp))
62694- reset_vma_resv_huge_pages(tmp);
62695-
62696- /*
62697 * Link in the new vma and copy the page table entries.
62698 */
62699 *pprev = tmp;
62700@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm
62701 if (retval)
62702 goto out;
62703 }
62704+
62705+#ifdef CONFIG_PAX_SEGMEXEC
62706+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62707+ struct vm_area_struct *mpnt_m;
62708+
62709+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62710+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62711+
62712+ if (!mpnt->vm_mirror)
62713+ continue;
62714+
62715+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62716+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62717+ mpnt->vm_mirror = mpnt_m;
62718+ } else {
62719+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62720+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62721+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62722+ mpnt->vm_mirror->vm_mirror = mpnt;
62723+ }
62724+ }
62725+ BUG_ON(mpnt_m);
62726+ }
62727+#endif
62728+
62729 /* a new mm has just been created */
62730 arch_dup_mmap(oldmm, mm);
62731 retval = 0;
62732@@ -429,14 +474,6 @@ out:
62733 flush_tlb_mm(oldmm);
62734 up_write(&oldmm->mmap_sem);
62735 return retval;
62736-fail_nomem_anon_vma_fork:
62737- mpol_put(pol);
62738-fail_nomem_policy:
62739- kmem_cache_free(vm_area_cachep, tmp);
62740-fail_nomem:
62741- retval = -ENOMEM;
62742- vm_unacct_memory(charge);
62743- goto out;
62744 }
62745
62746 static inline int mm_alloc_pgd(struct mm_struct * mm)
62747@@ -836,13 +873,14 @@ static int copy_fs(unsigned long clone_f
62748 spin_unlock(&fs->lock);
62749 return -EAGAIN;
62750 }
62751- fs->users++;
62752+ atomic_inc(&fs->users);
62753 spin_unlock(&fs->lock);
62754 return 0;
62755 }
62756 tsk->fs = copy_fs_struct(fs);
62757 if (!tsk->fs)
62758 return -ENOMEM;
62759+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62760 return 0;
62761 }
62762
62763@@ -1104,12 +1142,16 @@ static struct task_struct *copy_process(
62764 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62765 #endif
62766 retval = -EAGAIN;
62767+
62768+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62769+
62770 if (atomic_read(&p->real_cred->user->processes) >=
62771 task_rlimit(p, RLIMIT_NPROC)) {
62772- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62773- p->real_cred->user != INIT_USER)
62774+ if (p->real_cred->user != INIT_USER &&
62775+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
62776 goto bad_fork_free;
62777 }
62778+ current->flags &= ~PF_NPROC_EXCEEDED;
62779
62780 retval = copy_creds(p, clone_flags);
62781 if (retval < 0)
62782@@ -1250,6 +1292,8 @@ static struct task_struct *copy_process(
62783 if (clone_flags & CLONE_THREAD)
62784 p->tgid = current->tgid;
62785
62786+ gr_copy_label(p);
62787+
62788 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62789 /*
62790 * Clear TID on mm_release()?
62791@@ -1414,6 +1458,8 @@ bad_fork_cleanup_count:
62792 bad_fork_free:
62793 free_task(p);
62794 fork_out:
62795+ gr_log_forkfail(retval);
62796+
62797 return ERR_PTR(retval);
62798 }
62799
62800@@ -1502,6 +1548,8 @@ long do_fork(unsigned long clone_flags,
62801 if (clone_flags & CLONE_PARENT_SETTID)
62802 put_user(nr, parent_tidptr);
62803
62804+ gr_handle_brute_check();
62805+
62806 if (clone_flags & CLONE_VFORK) {
62807 p->vfork_done = &vfork;
62808 init_completion(&vfork);
62809@@ -1610,7 +1658,7 @@ static int unshare_fs(unsigned long unsh
62810 return 0;
62811
62812 /* don't need lock here; in the worst case we'll do useless copy */
62813- if (fs->users == 1)
62814+ if (atomic_read(&fs->users) == 1)
62815 return 0;
62816
62817 *new_fsp = copy_fs_struct(fs);
62818@@ -1697,7 +1745,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62819 fs = current->fs;
62820 spin_lock(&fs->lock);
62821 current->fs = new_fs;
62822- if (--fs->users)
62823+ gr_set_chroot_entries(current, &current->fs->root);
62824+ if (atomic_dec_return(&fs->users))
62825 new_fs = NULL;
62826 else
62827 new_fs = fs;
62828diff -urNp linux-3.0.7/kernel/futex.c linux-3.0.7/kernel/futex.c
62829--- linux-3.0.7/kernel/futex.c 2011-09-02 18:11:21.000000000 -0400
62830+++ linux-3.0.7/kernel/futex.c 2011-08-23 21:48:14.000000000 -0400
62831@@ -54,6 +54,7 @@
62832 #include <linux/mount.h>
62833 #include <linux/pagemap.h>
62834 #include <linux/syscalls.h>
62835+#include <linux/ptrace.h>
62836 #include <linux/signal.h>
62837 #include <linux/module.h>
62838 #include <linux/magic.h>
62839@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62840 struct page *page, *page_head;
62841 int err, ro = 0;
62842
62843+#ifdef CONFIG_PAX_SEGMEXEC
62844+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62845+ return -EFAULT;
62846+#endif
62847+
62848 /*
62849 * The futex address must be "naturally" aligned.
62850 */
62851@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
62852 struct futex_q q = futex_q_init;
62853 int ret;
62854
62855+ pax_track_stack();
62856+
62857 if (!bitset)
62858 return -EINVAL;
62859 q.bitset = bitset;
62860@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
62861 struct futex_q q = futex_q_init;
62862 int res, ret;
62863
62864+ pax_track_stack();
62865+
62866 if (!bitset)
62867 return -EINVAL;
62868
62869@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62870 {
62871 struct robust_list_head __user *head;
62872 unsigned long ret;
62873+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62874 const struct cred *cred = current_cred(), *pcred;
62875+#endif
62876
62877 if (!futex_cmpxchg_enabled)
62878 return -ENOSYS;
62879@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62880 if (!p)
62881 goto err_unlock;
62882 ret = -EPERM;
62883+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62884+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62885+ goto err_unlock;
62886+#else
62887 pcred = __task_cred(p);
62888 /* If victim is in different user_ns, then uids are not
62889 comparable, so we must have CAP_SYS_PTRACE */
62890@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62891 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62892 goto err_unlock;
62893 ok:
62894+#endif
62895 head = p->robust_list;
62896 rcu_read_unlock();
62897 }
62898@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
62899 {
62900 u32 curval;
62901 int i;
62902+ mm_segment_t oldfs;
62903
62904 /*
62905 * This will fail and we want it. Some arch implementations do
62906@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
62907 * implementation, the non-functional ones will return
62908 * -ENOSYS.
62909 */
62910+ oldfs = get_fs();
62911+ set_fs(USER_DS);
62912 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
62913 futex_cmpxchg_enabled = 1;
62914+ set_fs(oldfs);
62915
62916 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
62917 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
62918diff -urNp linux-3.0.7/kernel/futex_compat.c linux-3.0.7/kernel/futex_compat.c
62919--- linux-3.0.7/kernel/futex_compat.c 2011-07-21 22:17:23.000000000 -0400
62920+++ linux-3.0.7/kernel/futex_compat.c 2011-08-23 21:48:14.000000000 -0400
62921@@ -10,6 +10,7 @@
62922 #include <linux/compat.h>
62923 #include <linux/nsproxy.h>
62924 #include <linux/futex.h>
62925+#include <linux/ptrace.h>
62926
62927 #include <asm/uaccess.h>
62928
62929@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
62930 {
62931 struct compat_robust_list_head __user *head;
62932 unsigned long ret;
62933- const struct cred *cred = current_cred(), *pcred;
62934+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62935+ const struct cred *cred = current_cred();
62936+ const struct cred *pcred;
62937+#endif
62938
62939 if (!futex_cmpxchg_enabled)
62940 return -ENOSYS;
62941@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
62942 if (!p)
62943 goto err_unlock;
62944 ret = -EPERM;
62945+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62946+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62947+ goto err_unlock;
62948+#else
62949 pcred = __task_cred(p);
62950 /* If victim is in different user_ns, then uids are not
62951 comparable, so we must have CAP_SYS_PTRACE */
62952@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
62953 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
62954 goto err_unlock;
62955 ok:
62956+#endif
62957 head = p->compat_robust_list;
62958 rcu_read_unlock();
62959 }
62960diff -urNp linux-3.0.7/kernel/gcov/base.c linux-3.0.7/kernel/gcov/base.c
62961--- linux-3.0.7/kernel/gcov/base.c 2011-07-21 22:17:23.000000000 -0400
62962+++ linux-3.0.7/kernel/gcov/base.c 2011-08-23 21:47:56.000000000 -0400
62963@@ -102,11 +102,6 @@ void gcov_enable_events(void)
62964 }
62965
62966 #ifdef CONFIG_MODULES
62967-static inline int within(void *addr, void *start, unsigned long size)
62968-{
62969- return ((addr >= start) && (addr < start + size));
62970-}
62971-
62972 /* Update list and generate events when modules are unloaded. */
62973 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62974 void *data)
62975@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62976 prev = NULL;
62977 /* Remove entries located in module from linked list. */
62978 for (info = gcov_info_head; info; info = info->next) {
62979- if (within(info, mod->module_core, mod->core_size)) {
62980+ if (within_module_core_rw((unsigned long)info, mod)) {
62981 if (prev)
62982 prev->next = info->next;
62983 else
62984diff -urNp linux-3.0.7/kernel/hrtimer.c linux-3.0.7/kernel/hrtimer.c
62985--- linux-3.0.7/kernel/hrtimer.c 2011-07-21 22:17:23.000000000 -0400
62986+++ linux-3.0.7/kernel/hrtimer.c 2011-08-23 21:47:56.000000000 -0400
62987@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62988 local_irq_restore(flags);
62989 }
62990
62991-static void run_hrtimer_softirq(struct softirq_action *h)
62992+static void run_hrtimer_softirq(void)
62993 {
62994 hrtimer_peek_ahead_timers();
62995 }
62996diff -urNp linux-3.0.7/kernel/jump_label.c linux-3.0.7/kernel/jump_label.c
62997--- linux-3.0.7/kernel/jump_label.c 2011-07-21 22:17:23.000000000 -0400
62998+++ linux-3.0.7/kernel/jump_label.c 2011-08-23 21:47:56.000000000 -0400
62999@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63000
63001 size = (((unsigned long)stop - (unsigned long)start)
63002 / sizeof(struct jump_entry));
63003+ pax_open_kernel();
63004 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63005+ pax_close_kernel();
63006 }
63007
63008 static void jump_label_update(struct jump_label_key *key, int enable);
63009@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63010 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63011 struct jump_entry *iter;
63012
63013+ pax_open_kernel();
63014 for (iter = iter_start; iter < iter_stop; iter++) {
63015 if (within_module_init(iter->code, mod))
63016 iter->code = 0;
63017 }
63018+ pax_close_kernel();
63019 }
63020
63021 static int
63022diff -urNp linux-3.0.7/kernel/kallsyms.c linux-3.0.7/kernel/kallsyms.c
63023--- linux-3.0.7/kernel/kallsyms.c 2011-07-21 22:17:23.000000000 -0400
63024+++ linux-3.0.7/kernel/kallsyms.c 2011-08-23 21:48:14.000000000 -0400
63025@@ -11,6 +11,9 @@
63026 * Changed the compression method from stem compression to "table lookup"
63027 * compression (see scripts/kallsyms.c for a more complete description)
63028 */
63029+#ifdef CONFIG_GRKERNSEC_HIDESYM
63030+#define __INCLUDED_BY_HIDESYM 1
63031+#endif
63032 #include <linux/kallsyms.h>
63033 #include <linux/module.h>
63034 #include <linux/init.h>
63035@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63036
63037 static inline int is_kernel_inittext(unsigned long addr)
63038 {
63039+ if (system_state != SYSTEM_BOOTING)
63040+ return 0;
63041+
63042 if (addr >= (unsigned long)_sinittext
63043 && addr <= (unsigned long)_einittext)
63044 return 1;
63045 return 0;
63046 }
63047
63048+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63049+#ifdef CONFIG_MODULES
63050+static inline int is_module_text(unsigned long addr)
63051+{
63052+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63053+ return 1;
63054+
63055+ addr = ktla_ktva(addr);
63056+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63057+}
63058+#else
63059+static inline int is_module_text(unsigned long addr)
63060+{
63061+ return 0;
63062+}
63063+#endif
63064+#endif
63065+
63066 static inline int is_kernel_text(unsigned long addr)
63067 {
63068 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63069@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63070
63071 static inline int is_kernel(unsigned long addr)
63072 {
63073+
63074+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63075+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63076+ return 1;
63077+
63078+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63079+#else
63080 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63081+#endif
63082+
63083 return 1;
63084 return in_gate_area_no_mm(addr);
63085 }
63086
63087 static int is_ksym_addr(unsigned long addr)
63088 {
63089+
63090+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63091+ if (is_module_text(addr))
63092+ return 0;
63093+#endif
63094+
63095 if (all_var)
63096 return is_kernel(addr);
63097
63098@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63099
63100 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63101 {
63102- iter->name[0] = '\0';
63103 iter->nameoff = get_symbol_offset(new_pos);
63104 iter->pos = new_pos;
63105 }
63106@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63107 {
63108 struct kallsym_iter *iter = m->private;
63109
63110+#ifdef CONFIG_GRKERNSEC_HIDESYM
63111+ if (current_uid())
63112+ return 0;
63113+#endif
63114+
63115 /* Some debugging symbols have no name. Ignore them. */
63116 if (!iter->name[0])
63117 return 0;
63118@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63119 struct kallsym_iter *iter;
63120 int ret;
63121
63122- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63123+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63124 if (!iter)
63125 return -ENOMEM;
63126 reset_iter(iter, 0);
63127diff -urNp linux-3.0.7/kernel/kexec.c linux-3.0.7/kernel/kexec.c
63128--- linux-3.0.7/kernel/kexec.c 2011-07-21 22:17:23.000000000 -0400
63129+++ linux-3.0.7/kernel/kexec.c 2011-10-06 04:17:55.000000000 -0400
63130@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63131 unsigned long flags)
63132 {
63133 struct compat_kexec_segment in;
63134- struct kexec_segment out, __user *ksegments;
63135+ struct kexec_segment out;
63136+ struct kexec_segment __user *ksegments;
63137 unsigned long i, result;
63138
63139 /* Don't allow clients that don't understand the native
63140diff -urNp linux-3.0.7/kernel/kmod.c linux-3.0.7/kernel/kmod.c
63141--- linux-3.0.7/kernel/kmod.c 2011-07-21 22:17:23.000000000 -0400
63142+++ linux-3.0.7/kernel/kmod.c 2011-10-06 04:17:55.000000000 -0400
63143@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63144 * If module auto-loading support is disabled then this function
63145 * becomes a no-operation.
63146 */
63147-int __request_module(bool wait, const char *fmt, ...)
63148+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63149 {
63150- va_list args;
63151 char module_name[MODULE_NAME_LEN];
63152 unsigned int max_modprobes;
63153 int ret;
63154- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63155+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63156 static char *envp[] = { "HOME=/",
63157 "TERM=linux",
63158 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63159@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63160 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63161 static int kmod_loop_msg;
63162
63163- va_start(args, fmt);
63164- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63165- va_end(args);
63166+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63167 if (ret >= MODULE_NAME_LEN)
63168 return -ENAMETOOLONG;
63169
63170@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63171 if (ret)
63172 return ret;
63173
63174+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63175+ if (!current_uid()) {
63176+ /* hack to workaround consolekit/udisks stupidity */
63177+ read_lock(&tasklist_lock);
63178+ if (!strcmp(current->comm, "mount") &&
63179+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63180+ read_unlock(&tasklist_lock);
63181+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63182+ return -EPERM;
63183+ }
63184+ read_unlock(&tasklist_lock);
63185+ }
63186+#endif
63187+
63188 /* If modprobe needs a service that is in a module, we get a recursive
63189 * loop. Limit the number of running kmod threads to max_threads/2 or
63190 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63191@@ -131,6 +142,47 @@ int __request_module(bool wait, const ch
63192 atomic_dec(&kmod_concurrent);
63193 return ret;
63194 }
63195+
63196+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63197+{
63198+ va_list args;
63199+ int ret;
63200+
63201+ va_start(args, fmt);
63202+ ret = ____request_module(wait, module_param, fmt, args);
63203+ va_end(args);
63204+
63205+ return ret;
63206+}
63207+
63208+int __request_module(bool wait, const char *fmt, ...)
63209+{
63210+ va_list args;
63211+ int ret;
63212+
63213+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63214+ if (current_uid()) {
63215+ char module_param[MODULE_NAME_LEN];
63216+
63217+ memset(module_param, 0, sizeof(module_param));
63218+
63219+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63220+
63221+ va_start(args, fmt);
63222+ ret = ____request_module(wait, module_param, fmt, args);
63223+ va_end(args);
63224+
63225+ return ret;
63226+ }
63227+#endif
63228+
63229+ va_start(args, fmt);
63230+ ret = ____request_module(wait, NULL, fmt, args);
63231+ va_end(args);
63232+
63233+ return ret;
63234+}
63235+
63236 EXPORT_SYMBOL(__request_module);
63237 #endif /* CONFIG_MODULES */
63238
63239@@ -220,7 +272,7 @@ static int wait_for_helper(void *data)
63240 *
63241 * Thus the __user pointer cast is valid here.
63242 */
63243- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63244+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63245
63246 /*
63247 * If ret is 0, either ____call_usermodehelper failed and the
63248diff -urNp linux-3.0.7/kernel/kprobes.c linux-3.0.7/kernel/kprobes.c
63249--- linux-3.0.7/kernel/kprobes.c 2011-07-21 22:17:23.000000000 -0400
63250+++ linux-3.0.7/kernel/kprobes.c 2011-08-23 21:47:56.000000000 -0400
63251@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63252 * kernel image and loaded module images reside. This is required
63253 * so x86_64 can correctly handle the %rip-relative fixups.
63254 */
63255- kip->insns = module_alloc(PAGE_SIZE);
63256+ kip->insns = module_alloc_exec(PAGE_SIZE);
63257 if (!kip->insns) {
63258 kfree(kip);
63259 return NULL;
63260@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63261 */
63262 if (!list_is_singular(&kip->list)) {
63263 list_del(&kip->list);
63264- module_free(NULL, kip->insns);
63265+ module_free_exec(NULL, kip->insns);
63266 kfree(kip);
63267 }
63268 return 1;
63269@@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
63270 {
63271 int i, err = 0;
63272 unsigned long offset = 0, size = 0;
63273- char *modname, namebuf[128];
63274+ char *modname, namebuf[KSYM_NAME_LEN];
63275 const char *symbol_name;
63276 void *addr;
63277 struct kprobe_blackpoint *kb;
63278@@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
63279 const char *sym = NULL;
63280 unsigned int i = *(loff_t *) v;
63281 unsigned long offset = 0;
63282- char *modname, namebuf[128];
63283+ char *modname, namebuf[KSYM_NAME_LEN];
63284
63285 head = &kprobe_table[i];
63286 preempt_disable();
63287diff -urNp linux-3.0.7/kernel/lockdep.c linux-3.0.7/kernel/lockdep.c
63288--- linux-3.0.7/kernel/lockdep.c 2011-07-21 22:17:23.000000000 -0400
63289+++ linux-3.0.7/kernel/lockdep.c 2011-08-23 21:47:56.000000000 -0400
63290@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63291 end = (unsigned long) &_end,
63292 addr = (unsigned long) obj;
63293
63294+#ifdef CONFIG_PAX_KERNEXEC
63295+ start = ktla_ktva(start);
63296+#endif
63297+
63298 /*
63299 * static variable?
63300 */
63301@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63302 if (!static_obj(lock->key)) {
63303 debug_locks_off();
63304 printk("INFO: trying to register non-static key.\n");
63305+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63306 printk("the code is fine but needs lockdep annotation.\n");
63307 printk("turning off the locking correctness validator.\n");
63308 dump_stack();
63309@@ -2936,7 +2941,7 @@ static int __lock_acquire(struct lockdep
63310 if (!class)
63311 return 0;
63312 }
63313- atomic_inc((atomic_t *)&class->ops);
63314+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63315 if (very_verbose(class)) {
63316 printk("\nacquire class [%p] %s", class->key, class->name);
63317 if (class->name_version > 1)
63318diff -urNp linux-3.0.7/kernel/lockdep_proc.c linux-3.0.7/kernel/lockdep_proc.c
63319--- linux-3.0.7/kernel/lockdep_proc.c 2011-07-21 22:17:23.000000000 -0400
63320+++ linux-3.0.7/kernel/lockdep_proc.c 2011-08-23 21:47:56.000000000 -0400
63321@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63322
63323 static void print_name(struct seq_file *m, struct lock_class *class)
63324 {
63325- char str[128];
63326+ char str[KSYM_NAME_LEN];
63327 const char *name = class->name;
63328
63329 if (!name) {
63330diff -urNp linux-3.0.7/kernel/module.c linux-3.0.7/kernel/module.c
63331--- linux-3.0.7/kernel/module.c 2011-07-21 22:17:23.000000000 -0400
63332+++ linux-3.0.7/kernel/module.c 2011-08-23 21:48:14.000000000 -0400
63333@@ -58,6 +58,7 @@
63334 #include <linux/jump_label.h>
63335 #include <linux/pfn.h>
63336 #include <linux/bsearch.h>
63337+#include <linux/grsecurity.h>
63338
63339 #define CREATE_TRACE_POINTS
63340 #include <trace/events/module.h>
63341@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63342
63343 /* Bounds of module allocation, for speeding __module_address.
63344 * Protected by module_mutex. */
63345-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63346+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63347+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63348
63349 int register_module_notifier(struct notifier_block * nb)
63350 {
63351@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63352 return true;
63353
63354 list_for_each_entry_rcu(mod, &modules, list) {
63355- struct symsearch arr[] = {
63356+ struct symsearch modarr[] = {
63357 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63358 NOT_GPL_ONLY, false },
63359 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63360@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63361 #endif
63362 };
63363
63364- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63365+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63366 return true;
63367 }
63368 return false;
63369@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63370 static int percpu_modalloc(struct module *mod,
63371 unsigned long size, unsigned long align)
63372 {
63373- if (align > PAGE_SIZE) {
63374+ if (align-1 >= PAGE_SIZE) {
63375 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63376 mod->name, align, PAGE_SIZE);
63377 align = PAGE_SIZE;
63378@@ -1166,7 +1168,7 @@ resolve_symbol_wait(struct module *mod,
63379 */
63380 #ifdef CONFIG_SYSFS
63381
63382-#ifdef CONFIG_KALLSYMS
63383+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63384 static inline bool sect_empty(const Elf_Shdr *sect)
63385 {
63386 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63387@@ -1632,21 +1634,21 @@ static void set_section_ro_nx(void *base
63388
63389 static void unset_module_core_ro_nx(struct module *mod)
63390 {
63391- set_page_attributes(mod->module_core + mod->core_text_size,
63392- mod->module_core + mod->core_size,
63393+ set_page_attributes(mod->module_core_rw,
63394+ mod->module_core_rw + mod->core_size_rw,
63395 set_memory_x);
63396- set_page_attributes(mod->module_core,
63397- mod->module_core + mod->core_ro_size,
63398+ set_page_attributes(mod->module_core_rx,
63399+ mod->module_core_rx + mod->core_size_rx,
63400 set_memory_rw);
63401 }
63402
63403 static void unset_module_init_ro_nx(struct module *mod)
63404 {
63405- set_page_attributes(mod->module_init + mod->init_text_size,
63406- mod->module_init + mod->init_size,
63407+ set_page_attributes(mod->module_init_rw,
63408+ mod->module_init_rw + mod->init_size_rw,
63409 set_memory_x);
63410- set_page_attributes(mod->module_init,
63411- mod->module_init + mod->init_ro_size,
63412+ set_page_attributes(mod->module_init_rx,
63413+ mod->module_init_rx + mod->init_size_rx,
63414 set_memory_rw);
63415 }
63416
63417@@ -1657,14 +1659,14 @@ void set_all_modules_text_rw(void)
63418
63419 mutex_lock(&module_mutex);
63420 list_for_each_entry_rcu(mod, &modules, list) {
63421- if ((mod->module_core) && (mod->core_text_size)) {
63422- set_page_attributes(mod->module_core,
63423- mod->module_core + mod->core_text_size,
63424+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63425+ set_page_attributes(mod->module_core_rx,
63426+ mod->module_core_rx + mod->core_size_rx,
63427 set_memory_rw);
63428 }
63429- if ((mod->module_init) && (mod->init_text_size)) {
63430- set_page_attributes(mod->module_init,
63431- mod->module_init + mod->init_text_size,
63432+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63433+ set_page_attributes(mod->module_init_rx,
63434+ mod->module_init_rx + mod->init_size_rx,
63435 set_memory_rw);
63436 }
63437 }
63438@@ -1678,14 +1680,14 @@ void set_all_modules_text_ro(void)
63439
63440 mutex_lock(&module_mutex);
63441 list_for_each_entry_rcu(mod, &modules, list) {
63442- if ((mod->module_core) && (mod->core_text_size)) {
63443- set_page_attributes(mod->module_core,
63444- mod->module_core + mod->core_text_size,
63445+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63446+ set_page_attributes(mod->module_core_rx,
63447+ mod->module_core_rx + mod->core_size_rx,
63448 set_memory_ro);
63449 }
63450- if ((mod->module_init) && (mod->init_text_size)) {
63451- set_page_attributes(mod->module_init,
63452- mod->module_init + mod->init_text_size,
63453+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63454+ set_page_attributes(mod->module_init_rx,
63455+ mod->module_init_rx + mod->init_size_rx,
63456 set_memory_ro);
63457 }
63458 }
63459@@ -1722,16 +1724,19 @@ static void free_module(struct module *m
63460
63461 /* This may be NULL, but that's OK */
63462 unset_module_init_ro_nx(mod);
63463- module_free(mod, mod->module_init);
63464+ module_free(mod, mod->module_init_rw);
63465+ module_free_exec(mod, mod->module_init_rx);
63466 kfree(mod->args);
63467 percpu_modfree(mod);
63468
63469 /* Free lock-classes: */
63470- lockdep_free_key_range(mod->module_core, mod->core_size);
63471+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63472+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63473
63474 /* Finally, free the core (containing the module structure) */
63475 unset_module_core_ro_nx(mod);
63476- module_free(mod, mod->module_core);
63477+ module_free_exec(mod, mod->module_core_rx);
63478+ module_free(mod, mod->module_core_rw);
63479
63480 #ifdef CONFIG_MPU
63481 update_protections(current->mm);
63482@@ -1800,10 +1805,31 @@ static int simplify_symbols(struct modul
63483 unsigned int i;
63484 int ret = 0;
63485 const struct kernel_symbol *ksym;
63486+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63487+ int is_fs_load = 0;
63488+ int register_filesystem_found = 0;
63489+ char *p;
63490+
63491+ p = strstr(mod->args, "grsec_modharden_fs");
63492+ if (p) {
63493+ char *endptr = p + strlen("grsec_modharden_fs");
63494+ /* copy \0 as well */
63495+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63496+ is_fs_load = 1;
63497+ }
63498+#endif
63499
63500 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63501 const char *name = info->strtab + sym[i].st_name;
63502
63503+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63504+ /* it's a real shame this will never get ripped and copied
63505+ upstream! ;(
63506+ */
63507+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63508+ register_filesystem_found = 1;
63509+#endif
63510+
63511 switch (sym[i].st_shndx) {
63512 case SHN_COMMON:
63513 /* We compiled with -fno-common. These are not
63514@@ -1824,7 +1850,9 @@ static int simplify_symbols(struct modul
63515 ksym = resolve_symbol_wait(mod, info, name);
63516 /* Ok if resolved. */
63517 if (ksym && !IS_ERR(ksym)) {
63518+ pax_open_kernel();
63519 sym[i].st_value = ksym->value;
63520+ pax_close_kernel();
63521 break;
63522 }
63523
63524@@ -1843,11 +1871,20 @@ static int simplify_symbols(struct modul
63525 secbase = (unsigned long)mod_percpu(mod);
63526 else
63527 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63528+ pax_open_kernel();
63529 sym[i].st_value += secbase;
63530+ pax_close_kernel();
63531 break;
63532 }
63533 }
63534
63535+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63536+ if (is_fs_load && !register_filesystem_found) {
63537+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63538+ ret = -EPERM;
63539+ }
63540+#endif
63541+
63542 return ret;
63543 }
63544
63545@@ -1931,22 +1968,12 @@ static void layout_sections(struct modul
63546 || s->sh_entsize != ~0UL
63547 || strstarts(sname, ".init"))
63548 continue;
63549- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63550+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63551+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63552+ else
63553+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63554 DEBUGP("\t%s\n", name);
63555 }
63556- switch (m) {
63557- case 0: /* executable */
63558- mod->core_size = debug_align(mod->core_size);
63559- mod->core_text_size = mod->core_size;
63560- break;
63561- case 1: /* RO: text and ro-data */
63562- mod->core_size = debug_align(mod->core_size);
63563- mod->core_ro_size = mod->core_size;
63564- break;
63565- case 3: /* whole core */
63566- mod->core_size = debug_align(mod->core_size);
63567- break;
63568- }
63569 }
63570
63571 DEBUGP("Init section allocation order:\n");
63572@@ -1960,23 +1987,13 @@ static void layout_sections(struct modul
63573 || s->sh_entsize != ~0UL
63574 || !strstarts(sname, ".init"))
63575 continue;
63576- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63577- | INIT_OFFSET_MASK);
63578+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63579+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63580+ else
63581+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63582+ s->sh_entsize |= INIT_OFFSET_MASK;
63583 DEBUGP("\t%s\n", sname);
63584 }
63585- switch (m) {
63586- case 0: /* executable */
63587- mod->init_size = debug_align(mod->init_size);
63588- mod->init_text_size = mod->init_size;
63589- break;
63590- case 1: /* RO: text and ro-data */
63591- mod->init_size = debug_align(mod->init_size);
63592- mod->init_ro_size = mod->init_size;
63593- break;
63594- case 3: /* whole init */
63595- mod->init_size = debug_align(mod->init_size);
63596- break;
63597- }
63598 }
63599 }
63600
63601@@ -2141,7 +2158,7 @@ static void layout_symtab(struct module
63602
63603 /* Put symbol section at end of init part of module. */
63604 symsect->sh_flags |= SHF_ALLOC;
63605- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63606+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63607 info->index.sym) | INIT_OFFSET_MASK;
63608 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
63609
63610@@ -2158,19 +2175,19 @@ static void layout_symtab(struct module
63611 }
63612
63613 /* Append room for core symbols at end of core part. */
63614- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63615- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
63616+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63617+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
63618
63619 /* Put string table section at end of init part of module. */
63620 strsect->sh_flags |= SHF_ALLOC;
63621- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63622+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63623 info->index.str) | INIT_OFFSET_MASK;
63624 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
63625
63626 /* Append room for core symbols' strings at end of core part. */
63627- info->stroffs = mod->core_size;
63628+ info->stroffs = mod->core_size_rx;
63629 __set_bit(0, info->strmap);
63630- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
63631+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
63632 }
63633
63634 static void add_kallsyms(struct module *mod, const struct load_info *info)
63635@@ -2186,11 +2203,13 @@ static void add_kallsyms(struct module *
63636 /* Make sure we get permanent strtab: don't use info->strtab. */
63637 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
63638
63639+ pax_open_kernel();
63640+
63641 /* Set types up while we still have access to sections. */
63642 for (i = 0; i < mod->num_symtab; i++)
63643 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
63644
63645- mod->core_symtab = dst = mod->module_core + info->symoffs;
63646+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
63647 src = mod->symtab;
63648 *dst = *src;
63649 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63650@@ -2203,10 +2222,12 @@ static void add_kallsyms(struct module *
63651 }
63652 mod->core_num_syms = ndst;
63653
63654- mod->core_strtab = s = mod->module_core + info->stroffs;
63655+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
63656 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
63657 if (test_bit(i, info->strmap))
63658 *++s = mod->strtab[i];
63659+
63660+ pax_close_kernel();
63661 }
63662 #else
63663 static inline void layout_symtab(struct module *mod, struct load_info *info)
63664@@ -2235,17 +2256,33 @@ static void dynamic_debug_remove(struct
63665 ddebug_remove_module(debug->modname);
63666 }
63667
63668-static void *module_alloc_update_bounds(unsigned long size)
63669+static void *module_alloc_update_bounds_rw(unsigned long size)
63670 {
63671 void *ret = module_alloc(size);
63672
63673 if (ret) {
63674 mutex_lock(&module_mutex);
63675 /* Update module bounds. */
63676- if ((unsigned long)ret < module_addr_min)
63677- module_addr_min = (unsigned long)ret;
63678- if ((unsigned long)ret + size > module_addr_max)
63679- module_addr_max = (unsigned long)ret + size;
63680+ if ((unsigned long)ret < module_addr_min_rw)
63681+ module_addr_min_rw = (unsigned long)ret;
63682+ if ((unsigned long)ret + size > module_addr_max_rw)
63683+ module_addr_max_rw = (unsigned long)ret + size;
63684+ mutex_unlock(&module_mutex);
63685+ }
63686+ return ret;
63687+}
63688+
63689+static void *module_alloc_update_bounds_rx(unsigned long size)
63690+{
63691+ void *ret = module_alloc_exec(size);
63692+
63693+ if (ret) {
63694+ mutex_lock(&module_mutex);
63695+ /* Update module bounds. */
63696+ if ((unsigned long)ret < module_addr_min_rx)
63697+ module_addr_min_rx = (unsigned long)ret;
63698+ if ((unsigned long)ret + size > module_addr_max_rx)
63699+ module_addr_max_rx = (unsigned long)ret + size;
63700 mutex_unlock(&module_mutex);
63701 }
63702 return ret;
63703@@ -2538,7 +2575,7 @@ static int move_module(struct module *mo
63704 void *ptr;
63705
63706 /* Do the allocs. */
63707- ptr = module_alloc_update_bounds(mod->core_size);
63708+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63709 /*
63710 * The pointer to this block is stored in the module structure
63711 * which is inside the block. Just mark it as not being a
63712@@ -2548,23 +2585,50 @@ static int move_module(struct module *mo
63713 if (!ptr)
63714 return -ENOMEM;
63715
63716- memset(ptr, 0, mod->core_size);
63717- mod->module_core = ptr;
63718+ memset(ptr, 0, mod->core_size_rw);
63719+ mod->module_core_rw = ptr;
63720
63721- ptr = module_alloc_update_bounds(mod->init_size);
63722+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63723 /*
63724 * The pointer to this block is stored in the module structure
63725 * which is inside the block. This block doesn't need to be
63726 * scanned as it contains data and code that will be freed
63727 * after the module is initialized.
63728 */
63729- kmemleak_ignore(ptr);
63730- if (!ptr && mod->init_size) {
63731- module_free(mod, mod->module_core);
63732+ kmemleak_not_leak(ptr);
63733+ if (!ptr && mod->init_size_rw) {
63734+ module_free(mod, mod->module_core_rw);
63735 return -ENOMEM;
63736 }
63737- memset(ptr, 0, mod->init_size);
63738- mod->module_init = ptr;
63739+ memset(ptr, 0, mod->init_size_rw);
63740+ mod->module_init_rw = ptr;
63741+
63742+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63743+ kmemleak_not_leak(ptr);
63744+ if (!ptr) {
63745+ module_free(mod, mod->module_init_rw);
63746+ module_free(mod, mod->module_core_rw);
63747+ return -ENOMEM;
63748+ }
63749+
63750+ pax_open_kernel();
63751+ memset(ptr, 0, mod->core_size_rx);
63752+ pax_close_kernel();
63753+ mod->module_core_rx = ptr;
63754+
63755+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63756+ kmemleak_not_leak(ptr);
63757+ if (!ptr && mod->init_size_rx) {
63758+ module_free_exec(mod, mod->module_core_rx);
63759+ module_free(mod, mod->module_init_rw);
63760+ module_free(mod, mod->module_core_rw);
63761+ return -ENOMEM;
63762+ }
63763+
63764+ pax_open_kernel();
63765+ memset(ptr, 0, mod->init_size_rx);
63766+ pax_close_kernel();
63767+ mod->module_init_rx = ptr;
63768
63769 /* Transfer each section which specifies SHF_ALLOC */
63770 DEBUGP("final section addresses:\n");
63771@@ -2575,16 +2639,45 @@ static int move_module(struct module *mo
63772 if (!(shdr->sh_flags & SHF_ALLOC))
63773 continue;
63774
63775- if (shdr->sh_entsize & INIT_OFFSET_MASK)
63776- dest = mod->module_init
63777- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63778- else
63779- dest = mod->module_core + shdr->sh_entsize;
63780+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
63781+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63782+ dest = mod->module_init_rw
63783+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63784+ else
63785+ dest = mod->module_init_rx
63786+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
63787+ } else {
63788+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
63789+ dest = mod->module_core_rw + shdr->sh_entsize;
63790+ else
63791+ dest = mod->module_core_rx + shdr->sh_entsize;
63792+ }
63793+
63794+ if (shdr->sh_type != SHT_NOBITS) {
63795+
63796+#ifdef CONFIG_PAX_KERNEXEC
63797+#ifdef CONFIG_X86_64
63798+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
63799+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63800+#endif
63801+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
63802+ pax_open_kernel();
63803+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63804+ pax_close_kernel();
63805+ } else
63806+#endif
63807
63808- if (shdr->sh_type != SHT_NOBITS)
63809 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
63810+ }
63811 /* Update sh_addr to point to copy in image. */
63812- shdr->sh_addr = (unsigned long)dest;
63813+
63814+#ifdef CONFIG_PAX_KERNEXEC
63815+ if (shdr->sh_flags & SHF_EXECINSTR)
63816+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
63817+ else
63818+#endif
63819+
63820+ shdr->sh_addr = (unsigned long)dest;
63821 DEBUGP("\t0x%lx %s\n",
63822 shdr->sh_addr, info->secstrings + shdr->sh_name);
63823 }
63824@@ -2635,12 +2728,12 @@ static void flush_module_icache(const st
63825 * Do it before processing of module parameters, so the module
63826 * can provide parameter accessor functions of its own.
63827 */
63828- if (mod->module_init)
63829- flush_icache_range((unsigned long)mod->module_init,
63830- (unsigned long)mod->module_init
63831- + mod->init_size);
63832- flush_icache_range((unsigned long)mod->module_core,
63833- (unsigned long)mod->module_core + mod->core_size);
63834+ if (mod->module_init_rx)
63835+ flush_icache_range((unsigned long)mod->module_init_rx,
63836+ (unsigned long)mod->module_init_rx
63837+ + mod->init_size_rx);
63838+ flush_icache_range((unsigned long)mod->module_core_rx,
63839+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
63840
63841 set_fs(old_fs);
63842 }
63843@@ -2712,8 +2805,10 @@ static void module_deallocate(struct mod
63844 {
63845 kfree(info->strmap);
63846 percpu_modfree(mod);
63847- module_free(mod, mod->module_init);
63848- module_free(mod, mod->module_core);
63849+ module_free_exec(mod, mod->module_init_rx);
63850+ module_free_exec(mod, mod->module_core_rx);
63851+ module_free(mod, mod->module_init_rw);
63852+ module_free(mod, mod->module_core_rw);
63853 }
63854
63855 static int post_relocation(struct module *mod, const struct load_info *info)
63856@@ -2770,9 +2865,38 @@ static struct module *load_module(void _
63857 if (err)
63858 goto free_unload;
63859
63860+ /* Now copy in args */
63861+ mod->args = strndup_user(uargs, ~0UL >> 1);
63862+ if (IS_ERR(mod->args)) {
63863+ err = PTR_ERR(mod->args);
63864+ goto free_unload;
63865+ }
63866+
63867 /* Set up MODINFO_ATTR fields */
63868 setup_modinfo(mod, &info);
63869
63870+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63871+ {
63872+ char *p, *p2;
63873+
63874+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63875+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63876+ err = -EPERM;
63877+ goto free_modinfo;
63878+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63879+ p += strlen("grsec_modharden_normal");
63880+ p2 = strstr(p, "_");
63881+ if (p2) {
63882+ *p2 = '\0';
63883+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63884+ *p2 = '_';
63885+ }
63886+ err = -EPERM;
63887+ goto free_modinfo;
63888+ }
63889+ }
63890+#endif
63891+
63892 /* Fix up syms, so that st_value is a pointer to location. */
63893 err = simplify_symbols(mod, &info);
63894 if (err < 0)
63895@@ -2788,13 +2912,6 @@ static struct module *load_module(void _
63896
63897 flush_module_icache(mod);
63898
63899- /* Now copy in args */
63900- mod->args = strndup_user(uargs, ~0UL >> 1);
63901- if (IS_ERR(mod->args)) {
63902- err = PTR_ERR(mod->args);
63903- goto free_arch_cleanup;
63904- }
63905-
63906 /* Mark state as coming so strong_try_module_get() ignores us. */
63907 mod->state = MODULE_STATE_COMING;
63908
63909@@ -2854,11 +2971,10 @@ static struct module *load_module(void _
63910 unlock:
63911 mutex_unlock(&module_mutex);
63912 synchronize_sched();
63913- kfree(mod->args);
63914- free_arch_cleanup:
63915 module_arch_cleanup(mod);
63916 free_modinfo:
63917 free_modinfo(mod);
63918+ kfree(mod->args);
63919 free_unload:
63920 module_unload_free(mod);
63921 free_module:
63922@@ -2899,16 +3015,16 @@ SYSCALL_DEFINE3(init_module, void __user
63923 MODULE_STATE_COMING, mod);
63924
63925 /* Set RO and NX regions for core */
63926- set_section_ro_nx(mod->module_core,
63927- mod->core_text_size,
63928- mod->core_ro_size,
63929- mod->core_size);
63930+ set_section_ro_nx(mod->module_core_rx,
63931+ mod->core_size_rx,
63932+ mod->core_size_rx,
63933+ mod->core_size_rx);
63934
63935 /* Set RO and NX regions for init */
63936- set_section_ro_nx(mod->module_init,
63937- mod->init_text_size,
63938- mod->init_ro_size,
63939- mod->init_size);
63940+ set_section_ro_nx(mod->module_init_rx,
63941+ mod->init_size_rx,
63942+ mod->init_size_rx,
63943+ mod->init_size_rx);
63944
63945 do_mod_ctors(mod);
63946 /* Start the module */
63947@@ -2954,11 +3070,12 @@ SYSCALL_DEFINE3(init_module, void __user
63948 mod->strtab = mod->core_strtab;
63949 #endif
63950 unset_module_init_ro_nx(mod);
63951- module_free(mod, mod->module_init);
63952- mod->module_init = NULL;
63953- mod->init_size = 0;
63954- mod->init_ro_size = 0;
63955- mod->init_text_size = 0;
63956+ module_free(mod, mod->module_init_rw);
63957+ module_free_exec(mod, mod->module_init_rx);
63958+ mod->module_init_rw = NULL;
63959+ mod->module_init_rx = NULL;
63960+ mod->init_size_rw = 0;
63961+ mod->init_size_rx = 0;
63962 mutex_unlock(&module_mutex);
63963
63964 return 0;
63965@@ -2989,10 +3106,16 @@ static const char *get_ksymbol(struct mo
63966 unsigned long nextval;
63967
63968 /* At worse, next value is at end of module */
63969- if (within_module_init(addr, mod))
63970- nextval = (unsigned long)mod->module_init+mod->init_text_size;
63971+ if (within_module_init_rx(addr, mod))
63972+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63973+ else if (within_module_init_rw(addr, mod))
63974+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63975+ else if (within_module_core_rx(addr, mod))
63976+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63977+ else if (within_module_core_rw(addr, mod))
63978+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63979 else
63980- nextval = (unsigned long)mod->module_core+mod->core_text_size;
63981+ return NULL;
63982
63983 /* Scan for closest preceding symbol, and next symbol. (ELF
63984 starts real symbols at 1). */
63985@@ -3238,7 +3361,7 @@ static int m_show(struct seq_file *m, vo
63986 char buf[8];
63987
63988 seq_printf(m, "%s %u",
63989- mod->name, mod->init_size + mod->core_size);
63990+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63991 print_unload_info(m, mod);
63992
63993 /* Informative for users. */
63994@@ -3247,7 +3370,7 @@ static int m_show(struct seq_file *m, vo
63995 mod->state == MODULE_STATE_COMING ? "Loading":
63996 "Live");
63997 /* Used by oprofile and other similar tools. */
63998- seq_printf(m, " 0x%pK", mod->module_core);
63999+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64000
64001 /* Taints info */
64002 if (mod->taints)
64003@@ -3283,7 +3406,17 @@ static const struct file_operations proc
64004
64005 static int __init proc_modules_init(void)
64006 {
64007+#ifndef CONFIG_GRKERNSEC_HIDESYM
64008+#ifdef CONFIG_GRKERNSEC_PROC_USER
64009+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64010+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64011+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64012+#else
64013 proc_create("modules", 0, NULL, &proc_modules_operations);
64014+#endif
64015+#else
64016+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64017+#endif
64018 return 0;
64019 }
64020 module_init(proc_modules_init);
64021@@ -3342,12 +3475,12 @@ struct module *__module_address(unsigned
64022 {
64023 struct module *mod;
64024
64025- if (addr < module_addr_min || addr > module_addr_max)
64026+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64027+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64028 return NULL;
64029
64030 list_for_each_entry_rcu(mod, &modules, list)
64031- if (within_module_core(addr, mod)
64032- || within_module_init(addr, mod))
64033+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64034 return mod;
64035 return NULL;
64036 }
64037@@ -3381,11 +3514,20 @@ bool is_module_text_address(unsigned lon
64038 */
64039 struct module *__module_text_address(unsigned long addr)
64040 {
64041- struct module *mod = __module_address(addr);
64042+ struct module *mod;
64043+
64044+#ifdef CONFIG_X86_32
64045+ addr = ktla_ktva(addr);
64046+#endif
64047+
64048+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64049+ return NULL;
64050+
64051+ mod = __module_address(addr);
64052+
64053 if (mod) {
64054 /* Make sure it's within the text section. */
64055- if (!within(addr, mod->module_init, mod->init_text_size)
64056- && !within(addr, mod->module_core, mod->core_text_size))
64057+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64058 mod = NULL;
64059 }
64060 return mod;
64061diff -urNp linux-3.0.7/kernel/mutex.c linux-3.0.7/kernel/mutex.c
64062--- linux-3.0.7/kernel/mutex.c 2011-07-21 22:17:23.000000000 -0400
64063+++ linux-3.0.7/kernel/mutex.c 2011-08-23 21:47:56.000000000 -0400
64064@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64065 spin_lock_mutex(&lock->wait_lock, flags);
64066
64067 debug_mutex_lock_common(lock, &waiter);
64068- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64069+ debug_mutex_add_waiter(lock, &waiter, task);
64070
64071 /* add waiting tasks to the end of the waitqueue (FIFO): */
64072 list_add_tail(&waiter.list, &lock->wait_list);
64073@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64074 * TASK_UNINTERRUPTIBLE case.)
64075 */
64076 if (unlikely(signal_pending_state(state, task))) {
64077- mutex_remove_waiter(lock, &waiter,
64078- task_thread_info(task));
64079+ mutex_remove_waiter(lock, &waiter, task);
64080 mutex_release(&lock->dep_map, 1, ip);
64081 spin_unlock_mutex(&lock->wait_lock, flags);
64082
64083@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64084 done:
64085 lock_acquired(&lock->dep_map, ip);
64086 /* got the lock - rejoice! */
64087- mutex_remove_waiter(lock, &waiter, current_thread_info());
64088+ mutex_remove_waiter(lock, &waiter, task);
64089 mutex_set_owner(lock);
64090
64091 /* set it to 0 if there are no waiters left: */
64092diff -urNp linux-3.0.7/kernel/mutex-debug.c linux-3.0.7/kernel/mutex-debug.c
64093--- linux-3.0.7/kernel/mutex-debug.c 2011-07-21 22:17:23.000000000 -0400
64094+++ linux-3.0.7/kernel/mutex-debug.c 2011-08-23 21:47:56.000000000 -0400
64095@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64096 }
64097
64098 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64099- struct thread_info *ti)
64100+ struct task_struct *task)
64101 {
64102 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64103
64104 /* Mark the current thread as blocked on the lock: */
64105- ti->task->blocked_on = waiter;
64106+ task->blocked_on = waiter;
64107 }
64108
64109 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64110- struct thread_info *ti)
64111+ struct task_struct *task)
64112 {
64113 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64114- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64115- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64116- ti->task->blocked_on = NULL;
64117+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64118+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64119+ task->blocked_on = NULL;
64120
64121 list_del_init(&waiter->list);
64122 waiter->task = NULL;
64123diff -urNp linux-3.0.7/kernel/mutex-debug.h linux-3.0.7/kernel/mutex-debug.h
64124--- linux-3.0.7/kernel/mutex-debug.h 2011-07-21 22:17:23.000000000 -0400
64125+++ linux-3.0.7/kernel/mutex-debug.h 2011-08-23 21:47:56.000000000 -0400
64126@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64127 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64128 extern void debug_mutex_add_waiter(struct mutex *lock,
64129 struct mutex_waiter *waiter,
64130- struct thread_info *ti);
64131+ struct task_struct *task);
64132 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64133- struct thread_info *ti);
64134+ struct task_struct *task);
64135 extern void debug_mutex_unlock(struct mutex *lock);
64136 extern void debug_mutex_init(struct mutex *lock, const char *name,
64137 struct lock_class_key *key);
64138diff -urNp linux-3.0.7/kernel/padata.c linux-3.0.7/kernel/padata.c
64139--- linux-3.0.7/kernel/padata.c 2011-07-21 22:17:23.000000000 -0400
64140+++ linux-3.0.7/kernel/padata.c 2011-08-23 21:47:56.000000000 -0400
64141@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64142 padata->pd = pd;
64143 padata->cb_cpu = cb_cpu;
64144
64145- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64146- atomic_set(&pd->seq_nr, -1);
64147+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64148+ atomic_set_unchecked(&pd->seq_nr, -1);
64149
64150- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64151+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64152
64153 target_cpu = padata_cpu_hash(padata);
64154 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64155@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64156 padata_init_pqueues(pd);
64157 padata_init_squeues(pd);
64158 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64159- atomic_set(&pd->seq_nr, -1);
64160+ atomic_set_unchecked(&pd->seq_nr, -1);
64161 atomic_set(&pd->reorder_objects, 0);
64162 atomic_set(&pd->refcnt, 0);
64163 pd->pinst = pinst;
64164diff -urNp linux-3.0.7/kernel/panic.c linux-3.0.7/kernel/panic.c
64165--- linux-3.0.7/kernel/panic.c 2011-07-21 22:17:23.000000000 -0400
64166+++ linux-3.0.7/kernel/panic.c 2011-08-23 21:48:14.000000000 -0400
64167@@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
64168 const char *board;
64169
64170 printk(KERN_WARNING "------------[ cut here ]------------\n");
64171- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64172+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64173 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64174 if (board)
64175 printk(KERN_WARNING "Hardware name: %s\n", board);
64176@@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64177 */
64178 void __stack_chk_fail(void)
64179 {
64180- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64181+ dump_stack();
64182+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64183 __builtin_return_address(0));
64184 }
64185 EXPORT_SYMBOL(__stack_chk_fail);
64186diff -urNp linux-3.0.7/kernel/pid.c linux-3.0.7/kernel/pid.c
64187--- linux-3.0.7/kernel/pid.c 2011-07-21 22:17:23.000000000 -0400
64188+++ linux-3.0.7/kernel/pid.c 2011-08-23 21:48:14.000000000 -0400
64189@@ -33,6 +33,7 @@
64190 #include <linux/rculist.h>
64191 #include <linux/bootmem.h>
64192 #include <linux/hash.h>
64193+#include <linux/security.h>
64194 #include <linux/pid_namespace.h>
64195 #include <linux/init_task.h>
64196 #include <linux/syscalls.h>
64197@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64198
64199 int pid_max = PID_MAX_DEFAULT;
64200
64201-#define RESERVED_PIDS 300
64202+#define RESERVED_PIDS 500
64203
64204 int pid_max_min = RESERVED_PIDS + 1;
64205 int pid_max_max = PID_MAX_LIMIT;
64206@@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
64207 */
64208 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64209 {
64210+ struct task_struct *task;
64211+
64212 rcu_lockdep_assert(rcu_read_lock_held());
64213- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64214+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64215+
64216+ if (gr_pid_is_chrooted(task))
64217+ return NULL;
64218+
64219+ return task;
64220 }
64221
64222 struct task_struct *find_task_by_vpid(pid_t vnr)
64223@@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
64224 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64225 }
64226
64227+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64228+{
64229+ rcu_lockdep_assert(rcu_read_lock_held());
64230+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64231+}
64232+
64233 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64234 {
64235 struct pid *pid;
64236diff -urNp linux-3.0.7/kernel/posix-cpu-timers.c linux-3.0.7/kernel/posix-cpu-timers.c
64237--- linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:09.000000000 -0400
64238+++ linux-3.0.7/kernel/posix-cpu-timers.c 2011-10-17 23:17:19.000000000 -0400
64239@@ -6,6 +6,7 @@
64240 #include <linux/posix-timers.h>
64241 #include <linux/errno.h>
64242 #include <linux/math64.h>
64243+#include <linux/security.h>
64244 #include <asm/uaccess.h>
64245 #include <linux/kernel_stat.h>
64246 #include <trace/events/timer.h>
64247@@ -1605,14 +1606,14 @@ struct k_clock clock_posix_cpu = {
64248
64249 static __init int init_posix_cpu_timers(void)
64250 {
64251- struct k_clock process = {
64252+ static struct k_clock process = {
64253 .clock_getres = process_cpu_clock_getres,
64254 .clock_get = process_cpu_clock_get,
64255 .timer_create = process_cpu_timer_create,
64256 .nsleep = process_cpu_nsleep,
64257 .nsleep_restart = process_cpu_nsleep_restart,
64258 };
64259- struct k_clock thread = {
64260+ static struct k_clock thread = {
64261 .clock_getres = thread_cpu_clock_getres,
64262 .clock_get = thread_cpu_clock_get,
64263 .timer_create = thread_cpu_timer_create,
64264diff -urNp linux-3.0.7/kernel/posix-timers.c linux-3.0.7/kernel/posix-timers.c
64265--- linux-3.0.7/kernel/posix-timers.c 2011-07-21 22:17:23.000000000 -0400
64266+++ linux-3.0.7/kernel/posix-timers.c 2011-08-23 21:48:14.000000000 -0400
64267@@ -43,6 +43,7 @@
64268 #include <linux/idr.h>
64269 #include <linux/posix-clock.h>
64270 #include <linux/posix-timers.h>
64271+#include <linux/grsecurity.h>
64272 #include <linux/syscalls.h>
64273 #include <linux/wait.h>
64274 #include <linux/workqueue.h>
64275@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64276 * which we beg off on and pass to do_sys_settimeofday().
64277 */
64278
64279-static struct k_clock posix_clocks[MAX_CLOCKS];
64280+static struct k_clock *posix_clocks[MAX_CLOCKS];
64281
64282 /*
64283 * These ones are defined below.
64284@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64285 */
64286 static __init int init_posix_timers(void)
64287 {
64288- struct k_clock clock_realtime = {
64289+ static struct k_clock clock_realtime = {
64290 .clock_getres = hrtimer_get_res,
64291 .clock_get = posix_clock_realtime_get,
64292 .clock_set = posix_clock_realtime_set,
64293@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64294 .timer_get = common_timer_get,
64295 .timer_del = common_timer_del,
64296 };
64297- struct k_clock clock_monotonic = {
64298+ static struct k_clock clock_monotonic = {
64299 .clock_getres = hrtimer_get_res,
64300 .clock_get = posix_ktime_get_ts,
64301 .nsleep = common_nsleep,
64302@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64303 .timer_get = common_timer_get,
64304 .timer_del = common_timer_del,
64305 };
64306- struct k_clock clock_monotonic_raw = {
64307+ static struct k_clock clock_monotonic_raw = {
64308 .clock_getres = hrtimer_get_res,
64309 .clock_get = posix_get_monotonic_raw,
64310 };
64311- struct k_clock clock_realtime_coarse = {
64312+ static struct k_clock clock_realtime_coarse = {
64313 .clock_getres = posix_get_coarse_res,
64314 .clock_get = posix_get_realtime_coarse,
64315 };
64316- struct k_clock clock_monotonic_coarse = {
64317+ static struct k_clock clock_monotonic_coarse = {
64318 .clock_getres = posix_get_coarse_res,
64319 .clock_get = posix_get_monotonic_coarse,
64320 };
64321- struct k_clock clock_boottime = {
64322+ static struct k_clock clock_boottime = {
64323 .clock_getres = hrtimer_get_res,
64324 .clock_get = posix_get_boottime,
64325 .nsleep = common_nsleep,
64326@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64327 .timer_del = common_timer_del,
64328 };
64329
64330+ pax_track_stack();
64331+
64332 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64333 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64334 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64335@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64336 return;
64337 }
64338
64339- posix_clocks[clock_id] = *new_clock;
64340+ posix_clocks[clock_id] = new_clock;
64341 }
64342 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64343
64344@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64345 return (id & CLOCKFD_MASK) == CLOCKFD ?
64346 &clock_posix_dynamic : &clock_posix_cpu;
64347
64348- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64349+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64350 return NULL;
64351- return &posix_clocks[id];
64352+ return posix_clocks[id];
64353 }
64354
64355 static int common_timer_create(struct k_itimer *new_timer)
64356@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64357 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64358 return -EFAULT;
64359
64360+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64361+ have their clock_set fptr set to a nosettime dummy function
64362+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64363+ call common_clock_set, which calls do_sys_settimeofday, which
64364+ we hook
64365+ */
64366+
64367 return kc->clock_set(which_clock, &new_tp);
64368 }
64369
64370diff -urNp linux-3.0.7/kernel/power/poweroff.c linux-3.0.7/kernel/power/poweroff.c
64371--- linux-3.0.7/kernel/power/poweroff.c 2011-07-21 22:17:23.000000000 -0400
64372+++ linux-3.0.7/kernel/power/poweroff.c 2011-08-23 21:47:56.000000000 -0400
64373@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64374 .enable_mask = SYSRQ_ENABLE_BOOT,
64375 };
64376
64377-static int pm_sysrq_init(void)
64378+static int __init pm_sysrq_init(void)
64379 {
64380 register_sysrq_key('o', &sysrq_poweroff_op);
64381 return 0;
64382diff -urNp linux-3.0.7/kernel/power/process.c linux-3.0.7/kernel/power/process.c
64383--- linux-3.0.7/kernel/power/process.c 2011-07-21 22:17:23.000000000 -0400
64384+++ linux-3.0.7/kernel/power/process.c 2011-08-23 21:47:56.000000000 -0400
64385@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64386 u64 elapsed_csecs64;
64387 unsigned int elapsed_csecs;
64388 bool wakeup = false;
64389+ bool timedout = false;
64390
64391 do_gettimeofday(&start);
64392
64393@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64394
64395 while (true) {
64396 todo = 0;
64397+ if (time_after(jiffies, end_time))
64398+ timedout = true;
64399 read_lock(&tasklist_lock);
64400 do_each_thread(g, p) {
64401 if (frozen(p) || !freezable(p))
64402@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64403 * try_to_stop() after schedule() in ptrace/signal
64404 * stop sees TIF_FREEZE.
64405 */
64406- if (!task_is_stopped_or_traced(p) &&
64407- !freezer_should_skip(p))
64408+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64409 todo++;
64410+ if (timedout) {
64411+ printk(KERN_ERR "Task refusing to freeze:\n");
64412+ sched_show_task(p);
64413+ }
64414+ }
64415 } while_each_thread(g, p);
64416 read_unlock(&tasklist_lock);
64417
64418@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64419 todo += wq_busy;
64420 }
64421
64422- if (!todo || time_after(jiffies, end_time))
64423+ if (!todo || timedout)
64424 break;
64425
64426 if (pm_wakeup_pending()) {
64427diff -urNp linux-3.0.7/kernel/printk.c linux-3.0.7/kernel/printk.c
64428--- linux-3.0.7/kernel/printk.c 2011-10-16 21:54:54.000000000 -0400
64429+++ linux-3.0.7/kernel/printk.c 2011-10-16 21:55:28.000000000 -0400
64430@@ -313,12 +313,17 @@ static int check_syslog_permissions(int
64431 if (from_file && type != SYSLOG_ACTION_OPEN)
64432 return 0;
64433
64434+#ifdef CONFIG_GRKERNSEC_DMESG
64435+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64436+ return -EPERM;
64437+#endif
64438+
64439 if (syslog_action_restricted(type)) {
64440 if (capable(CAP_SYSLOG))
64441 return 0;
64442 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
64443 if (capable(CAP_SYS_ADMIN)) {
64444- WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
64445+ printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
64446 "but no CAP_SYSLOG (deprecated).\n");
64447 return 0;
64448 }
64449diff -urNp linux-3.0.7/kernel/profile.c linux-3.0.7/kernel/profile.c
64450--- linux-3.0.7/kernel/profile.c 2011-07-21 22:17:23.000000000 -0400
64451+++ linux-3.0.7/kernel/profile.c 2011-08-23 21:47:56.000000000 -0400
64452@@ -39,7 +39,7 @@ struct profile_hit {
64453 /* Oprofile timer tick hook */
64454 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64455
64456-static atomic_t *prof_buffer;
64457+static atomic_unchecked_t *prof_buffer;
64458 static unsigned long prof_len, prof_shift;
64459
64460 int prof_on __read_mostly;
64461@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64462 hits[i].pc = 0;
64463 continue;
64464 }
64465- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64466+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64467 hits[i].hits = hits[i].pc = 0;
64468 }
64469 }
64470@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64471 * Add the current hit(s) and flush the write-queue out
64472 * to the global buffer:
64473 */
64474- atomic_add(nr_hits, &prof_buffer[pc]);
64475+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64476 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64477- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64478+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64479 hits[i].pc = hits[i].hits = 0;
64480 }
64481 out:
64482@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64483 {
64484 unsigned long pc;
64485 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64486- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64487+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64488 }
64489 #endif /* !CONFIG_SMP */
64490
64491@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64492 return -EFAULT;
64493 buf++; p++; count--; read++;
64494 }
64495- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64496+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64497 if (copy_to_user(buf, (void *)pnt, count))
64498 return -EFAULT;
64499 read += count;
64500@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64501 }
64502 #endif
64503 profile_discard_flip_buffers();
64504- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64505+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64506 return count;
64507 }
64508
64509diff -urNp linux-3.0.7/kernel/ptrace.c linux-3.0.7/kernel/ptrace.c
64510--- linux-3.0.7/kernel/ptrace.c 2011-07-21 22:17:23.000000000 -0400
64511+++ linux-3.0.7/kernel/ptrace.c 2011-08-23 21:48:14.000000000 -0400
64512@@ -132,7 +132,8 @@ int ptrace_check_attach(struct task_stru
64513 return ret;
64514 }
64515
64516-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64517+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64518+ unsigned int log)
64519 {
64520 const struct cred *cred = current_cred(), *tcred;
64521
64522@@ -158,7 +159,8 @@ int __ptrace_may_access(struct task_stru
64523 cred->gid == tcred->sgid &&
64524 cred->gid == tcred->gid))
64525 goto ok;
64526- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64527+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64528+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64529 goto ok;
64530 rcu_read_unlock();
64531 return -EPERM;
64532@@ -167,7 +169,9 @@ ok:
64533 smp_rmb();
64534 if (task->mm)
64535 dumpable = get_dumpable(task->mm);
64536- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64537+ if (!dumpable &&
64538+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64539+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64540 return -EPERM;
64541
64542 return security_ptrace_access_check(task, mode);
64543@@ -177,7 +181,16 @@ bool ptrace_may_access(struct task_struc
64544 {
64545 int err;
64546 task_lock(task);
64547- err = __ptrace_may_access(task, mode);
64548+ err = __ptrace_may_access(task, mode, 0);
64549+ task_unlock(task);
64550+ return !err;
64551+}
64552+
64553+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64554+{
64555+ int err;
64556+ task_lock(task);
64557+ err = __ptrace_may_access(task, mode, 1);
64558 task_unlock(task);
64559 return !err;
64560 }
64561@@ -205,7 +218,7 @@ static int ptrace_attach(struct task_str
64562 goto out;
64563
64564 task_lock(task);
64565- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64566+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64567 task_unlock(task);
64568 if (retval)
64569 goto unlock_creds;
64570@@ -218,7 +231,7 @@ static int ptrace_attach(struct task_str
64571 goto unlock_tasklist;
64572
64573 task->ptrace = PT_PTRACED;
64574- if (task_ns_capable(task, CAP_SYS_PTRACE))
64575+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
64576 task->ptrace |= PT_PTRACE_CAP;
64577
64578 __ptrace_link(task, current);
64579@@ -406,6 +419,8 @@ int ptrace_readdata(struct task_struct *
64580 {
64581 int copied = 0;
64582
64583+ pax_track_stack();
64584+
64585 while (len > 0) {
64586 char buf[128];
64587 int this_len, retval;
64588@@ -417,7 +432,7 @@ int ptrace_readdata(struct task_struct *
64589 break;
64590 return -EIO;
64591 }
64592- if (copy_to_user(dst, buf, retval))
64593+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
64594 return -EFAULT;
64595 copied += retval;
64596 src += retval;
64597@@ -431,6 +446,8 @@ int ptrace_writedata(struct task_struct
64598 {
64599 int copied = 0;
64600
64601+ pax_track_stack();
64602+
64603 while (len > 0) {
64604 char buf[128];
64605 int this_len, retval;
64606@@ -613,9 +630,11 @@ int ptrace_request(struct task_struct *c
64607 {
64608 int ret = -EIO;
64609 siginfo_t siginfo;
64610- void __user *datavp = (void __user *) data;
64611+ void __user *datavp = (__force void __user *) data;
64612 unsigned long __user *datalp = datavp;
64613
64614+ pax_track_stack();
64615+
64616 switch (request) {
64617 case PTRACE_PEEKTEXT:
64618 case PTRACE_PEEKDATA:
64619@@ -761,14 +780,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64620 goto out;
64621 }
64622
64623+ if (gr_handle_ptrace(child, request)) {
64624+ ret = -EPERM;
64625+ goto out_put_task_struct;
64626+ }
64627+
64628 if (request == PTRACE_ATTACH) {
64629 ret = ptrace_attach(child);
64630 /*
64631 * Some architectures need to do book-keeping after
64632 * a ptrace attach.
64633 */
64634- if (!ret)
64635+ if (!ret) {
64636 arch_ptrace_attach(child);
64637+ gr_audit_ptrace(child);
64638+ }
64639 goto out_put_task_struct;
64640 }
64641
64642@@ -793,7 +819,7 @@ int generic_ptrace_peekdata(struct task_
64643 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64644 if (copied != sizeof(tmp))
64645 return -EIO;
64646- return put_user(tmp, (unsigned long __user *)data);
64647+ return put_user(tmp, (__force unsigned long __user *)data);
64648 }
64649
64650 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
64651@@ -816,6 +842,8 @@ int compat_ptrace_request(struct task_st
64652 siginfo_t siginfo;
64653 int ret;
64654
64655+ pax_track_stack();
64656+
64657 switch (request) {
64658 case PTRACE_PEEKTEXT:
64659 case PTRACE_PEEKDATA:
64660@@ -903,14 +931,21 @@ asmlinkage long compat_sys_ptrace(compat
64661 goto out;
64662 }
64663
64664+ if (gr_handle_ptrace(child, request)) {
64665+ ret = -EPERM;
64666+ goto out_put_task_struct;
64667+ }
64668+
64669 if (request == PTRACE_ATTACH) {
64670 ret = ptrace_attach(child);
64671 /*
64672 * Some architectures need to do book-keeping after
64673 * a ptrace attach.
64674 */
64675- if (!ret)
64676+ if (!ret) {
64677 arch_ptrace_attach(child);
64678+ gr_audit_ptrace(child);
64679+ }
64680 goto out_put_task_struct;
64681 }
64682
64683diff -urNp linux-3.0.7/kernel/rcutorture.c linux-3.0.7/kernel/rcutorture.c
64684--- linux-3.0.7/kernel/rcutorture.c 2011-07-21 22:17:23.000000000 -0400
64685+++ linux-3.0.7/kernel/rcutorture.c 2011-08-23 21:47:56.000000000 -0400
64686@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64687 { 0 };
64688 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64689 { 0 };
64690-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64691-static atomic_t n_rcu_torture_alloc;
64692-static atomic_t n_rcu_torture_alloc_fail;
64693-static atomic_t n_rcu_torture_free;
64694-static atomic_t n_rcu_torture_mberror;
64695-static atomic_t n_rcu_torture_error;
64696+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64697+static atomic_unchecked_t n_rcu_torture_alloc;
64698+static atomic_unchecked_t n_rcu_torture_alloc_fail;
64699+static atomic_unchecked_t n_rcu_torture_free;
64700+static atomic_unchecked_t n_rcu_torture_mberror;
64701+static atomic_unchecked_t n_rcu_torture_error;
64702 static long n_rcu_torture_boost_ktrerror;
64703 static long n_rcu_torture_boost_rterror;
64704 static long n_rcu_torture_boost_failure;
64705@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
64706
64707 spin_lock_bh(&rcu_torture_lock);
64708 if (list_empty(&rcu_torture_freelist)) {
64709- atomic_inc(&n_rcu_torture_alloc_fail);
64710+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64711 spin_unlock_bh(&rcu_torture_lock);
64712 return NULL;
64713 }
64714- atomic_inc(&n_rcu_torture_alloc);
64715+ atomic_inc_unchecked(&n_rcu_torture_alloc);
64716 p = rcu_torture_freelist.next;
64717 list_del_init(p);
64718 spin_unlock_bh(&rcu_torture_lock);
64719@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
64720 static void
64721 rcu_torture_free(struct rcu_torture *p)
64722 {
64723- atomic_inc(&n_rcu_torture_free);
64724+ atomic_inc_unchecked(&n_rcu_torture_free);
64725 spin_lock_bh(&rcu_torture_lock);
64726 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64727 spin_unlock_bh(&rcu_torture_lock);
64728@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
64729 i = rp->rtort_pipe_count;
64730 if (i > RCU_TORTURE_PIPE_LEN)
64731 i = RCU_TORTURE_PIPE_LEN;
64732- atomic_inc(&rcu_torture_wcount[i]);
64733+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64734 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64735 rp->rtort_mbtest = 0;
64736 rcu_torture_free(rp);
64737@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
64738 i = rp->rtort_pipe_count;
64739 if (i > RCU_TORTURE_PIPE_LEN)
64740 i = RCU_TORTURE_PIPE_LEN;
64741- atomic_inc(&rcu_torture_wcount[i]);
64742+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64743 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64744 rp->rtort_mbtest = 0;
64745 list_del(&rp->rtort_free);
64746@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
64747 i = old_rp->rtort_pipe_count;
64748 if (i > RCU_TORTURE_PIPE_LEN)
64749 i = RCU_TORTURE_PIPE_LEN;
64750- atomic_inc(&rcu_torture_wcount[i]);
64751+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64752 old_rp->rtort_pipe_count++;
64753 cur_ops->deferred_free(old_rp);
64754 }
64755@@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
64756 return;
64757 }
64758 if (p->rtort_mbtest == 0)
64759- atomic_inc(&n_rcu_torture_mberror);
64760+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64761 spin_lock(&rand_lock);
64762 cur_ops->read_delay(&rand);
64763 n_rcu_torture_timers++;
64764@@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
64765 continue;
64766 }
64767 if (p->rtort_mbtest == 0)
64768- atomic_inc(&n_rcu_torture_mberror);
64769+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64770 cur_ops->read_delay(&rand);
64771 preempt_disable();
64772 pipe_count = p->rtort_pipe_count;
64773@@ -1072,16 +1072,16 @@ rcu_torture_printk(char *page)
64774 rcu_torture_current,
64775 rcu_torture_current_version,
64776 list_empty(&rcu_torture_freelist),
64777- atomic_read(&n_rcu_torture_alloc),
64778- atomic_read(&n_rcu_torture_alloc_fail),
64779- atomic_read(&n_rcu_torture_free),
64780- atomic_read(&n_rcu_torture_mberror),
64781+ atomic_read_unchecked(&n_rcu_torture_alloc),
64782+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64783+ atomic_read_unchecked(&n_rcu_torture_free),
64784+ atomic_read_unchecked(&n_rcu_torture_mberror),
64785 n_rcu_torture_boost_ktrerror,
64786 n_rcu_torture_boost_rterror,
64787 n_rcu_torture_boost_failure,
64788 n_rcu_torture_boosts,
64789 n_rcu_torture_timers);
64790- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
64791+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
64792 n_rcu_torture_boost_ktrerror != 0 ||
64793 n_rcu_torture_boost_rterror != 0 ||
64794 n_rcu_torture_boost_failure != 0)
64795@@ -1089,7 +1089,7 @@ rcu_torture_printk(char *page)
64796 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64797 if (i > 1) {
64798 cnt += sprintf(&page[cnt], "!!! ");
64799- atomic_inc(&n_rcu_torture_error);
64800+ atomic_inc_unchecked(&n_rcu_torture_error);
64801 WARN_ON_ONCE(1);
64802 }
64803 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64804@@ -1103,7 +1103,7 @@ rcu_torture_printk(char *page)
64805 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
64806 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64807 cnt += sprintf(&page[cnt], " %d",
64808- atomic_read(&rcu_torture_wcount[i]));
64809+ atomic_read_unchecked(&rcu_torture_wcount[i]));
64810 }
64811 cnt += sprintf(&page[cnt], "\n");
64812 if (cur_ops->stats)
64813@@ -1412,7 +1412,7 @@ rcu_torture_cleanup(void)
64814
64815 if (cur_ops->cleanup)
64816 cur_ops->cleanup();
64817- if (atomic_read(&n_rcu_torture_error))
64818+ if (atomic_read_unchecked(&n_rcu_torture_error))
64819 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
64820 else
64821 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
64822@@ -1476,17 +1476,17 @@ rcu_torture_init(void)
64823
64824 rcu_torture_current = NULL;
64825 rcu_torture_current_version = 0;
64826- atomic_set(&n_rcu_torture_alloc, 0);
64827- atomic_set(&n_rcu_torture_alloc_fail, 0);
64828- atomic_set(&n_rcu_torture_free, 0);
64829- atomic_set(&n_rcu_torture_mberror, 0);
64830- atomic_set(&n_rcu_torture_error, 0);
64831+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
64832+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
64833+ atomic_set_unchecked(&n_rcu_torture_free, 0);
64834+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
64835+ atomic_set_unchecked(&n_rcu_torture_error, 0);
64836 n_rcu_torture_boost_ktrerror = 0;
64837 n_rcu_torture_boost_rterror = 0;
64838 n_rcu_torture_boost_failure = 0;
64839 n_rcu_torture_boosts = 0;
64840 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
64841- atomic_set(&rcu_torture_wcount[i], 0);
64842+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
64843 for_each_possible_cpu(cpu) {
64844 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64845 per_cpu(rcu_torture_count, cpu)[i] = 0;
64846diff -urNp linux-3.0.7/kernel/rcutree.c linux-3.0.7/kernel/rcutree.c
64847--- linux-3.0.7/kernel/rcutree.c 2011-07-21 22:17:23.000000000 -0400
64848+++ linux-3.0.7/kernel/rcutree.c 2011-09-14 09:08:05.000000000 -0400
64849@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
64850 }
64851 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64852 smp_mb__before_atomic_inc(); /* See above. */
64853- atomic_inc(&rdtp->dynticks);
64854+ atomic_inc_unchecked(&rdtp->dynticks);
64855 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
64856- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64857+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64858 local_irq_restore(flags);
64859
64860 /* If the interrupt queued a callback, get out of dyntick mode. */
64861@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
64862 return;
64863 }
64864 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
64865- atomic_inc(&rdtp->dynticks);
64866+ atomic_inc_unchecked(&rdtp->dynticks);
64867 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64868 smp_mb__after_atomic_inc(); /* See above. */
64869- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64870+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64871 local_irq_restore(flags);
64872 }
64873
64874@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
64875 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
64876
64877 if (rdtp->dynticks_nmi_nesting == 0 &&
64878- (atomic_read(&rdtp->dynticks) & 0x1))
64879+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
64880 return;
64881 rdtp->dynticks_nmi_nesting++;
64882 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
64883- atomic_inc(&rdtp->dynticks);
64884+ atomic_inc_unchecked(&rdtp->dynticks);
64885 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
64886 smp_mb__after_atomic_inc(); /* See above. */
64887- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
64888+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
64889 }
64890
64891 /**
64892@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
64893 return;
64894 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
64895 smp_mb__before_atomic_inc(); /* See above. */
64896- atomic_inc(&rdtp->dynticks);
64897+ atomic_inc_unchecked(&rdtp->dynticks);
64898 smp_mb__after_atomic_inc(); /* Force delay to next write. */
64899- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64900+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
64901 }
64902
64903 /**
64904@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
64905 */
64906 static int dyntick_save_progress_counter(struct rcu_data *rdp)
64907 {
64908- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
64909+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64910 return 0;
64911 }
64912
64913@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
64914 unsigned long curr;
64915 unsigned long snap;
64916
64917- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
64918+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
64919 snap = (unsigned long)rdp->dynticks_snap;
64920
64921 /*
64922@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
64923 /*
64924 * Do softirq processing for the current CPU.
64925 */
64926-static void rcu_process_callbacks(struct softirq_action *unused)
64927+static void rcu_process_callbacks(void)
64928 {
64929 __rcu_process_callbacks(&rcu_sched_state,
64930 &__get_cpu_var(rcu_sched_data));
64931diff -urNp linux-3.0.7/kernel/rcutree.h linux-3.0.7/kernel/rcutree.h
64932--- linux-3.0.7/kernel/rcutree.h 2011-07-21 22:17:23.000000000 -0400
64933+++ linux-3.0.7/kernel/rcutree.h 2011-09-14 09:08:05.000000000 -0400
64934@@ -86,7 +86,7 @@
64935 struct rcu_dynticks {
64936 int dynticks_nesting; /* Track irq/process nesting level. */
64937 int dynticks_nmi_nesting; /* Track NMI nesting level. */
64938- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
64939+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
64940 };
64941
64942 /* RCU's kthread states for tracing. */
64943diff -urNp linux-3.0.7/kernel/rcutree_plugin.h linux-3.0.7/kernel/rcutree_plugin.h
64944--- linux-3.0.7/kernel/rcutree_plugin.h 2011-07-21 22:17:23.000000000 -0400
64945+++ linux-3.0.7/kernel/rcutree_plugin.h 2011-08-23 21:47:56.000000000 -0400
64946@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
64947
64948 /* Clean up and exit. */
64949 smp_mb(); /* ensure expedited GP seen before counter increment. */
64950- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
64951+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
64952 unlock_mb_ret:
64953 mutex_unlock(&sync_rcu_preempt_exp_mutex);
64954 mb_ret:
64955@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
64956
64957 #else /* #ifndef CONFIG_SMP */
64958
64959-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
64960-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
64961+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
64962+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
64963
64964 static int synchronize_sched_expedited_cpu_stop(void *data)
64965 {
64966@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
64967 int firstsnap, s, snap, trycount = 0;
64968
64969 /* Note that atomic_inc_return() implies full memory barrier. */
64970- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
64971+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
64972 get_online_cpus();
64973
64974 /*
64975@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
64976 }
64977
64978 /* Check to see if someone else did our work for us. */
64979- s = atomic_read(&sync_sched_expedited_done);
64980+ s = atomic_read_unchecked(&sync_sched_expedited_done);
64981 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
64982 smp_mb(); /* ensure test happens before caller kfree */
64983 return;
64984@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
64985 * grace period works for us.
64986 */
64987 get_online_cpus();
64988- snap = atomic_read(&sync_sched_expedited_started) - 1;
64989+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
64990 smp_mb(); /* ensure read is before try_stop_cpus(). */
64991 }
64992
64993@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
64994 * than we did beat us to the punch.
64995 */
64996 do {
64997- s = atomic_read(&sync_sched_expedited_done);
64998+ s = atomic_read_unchecked(&sync_sched_expedited_done);
64999 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65000 smp_mb(); /* ensure test happens before caller kfree */
65001 break;
65002 }
65003- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65004+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65005
65006 put_online_cpus();
65007 }
65008diff -urNp linux-3.0.7/kernel/relay.c linux-3.0.7/kernel/relay.c
65009--- linux-3.0.7/kernel/relay.c 2011-07-21 22:17:23.000000000 -0400
65010+++ linux-3.0.7/kernel/relay.c 2011-08-23 21:48:14.000000000 -0400
65011@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65012 };
65013 ssize_t ret;
65014
65015+ pax_track_stack();
65016+
65017 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65018 return 0;
65019 if (splice_grow_spd(pipe, &spd))
65020diff -urNp linux-3.0.7/kernel/resource.c linux-3.0.7/kernel/resource.c
65021--- linux-3.0.7/kernel/resource.c 2011-07-21 22:17:23.000000000 -0400
65022+++ linux-3.0.7/kernel/resource.c 2011-08-23 21:48:14.000000000 -0400
65023@@ -141,8 +141,18 @@ static const struct file_operations proc
65024
65025 static int __init ioresources_init(void)
65026 {
65027+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65028+#ifdef CONFIG_GRKERNSEC_PROC_USER
65029+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65030+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65031+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65032+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65033+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65034+#endif
65035+#else
65036 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65037 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65038+#endif
65039 return 0;
65040 }
65041 __initcall(ioresources_init);
65042diff -urNp linux-3.0.7/kernel/rtmutex-tester.c linux-3.0.7/kernel/rtmutex-tester.c
65043--- linux-3.0.7/kernel/rtmutex-tester.c 2011-07-21 22:17:23.000000000 -0400
65044+++ linux-3.0.7/kernel/rtmutex-tester.c 2011-08-23 21:47:56.000000000 -0400
65045@@ -20,7 +20,7 @@
65046 #define MAX_RT_TEST_MUTEXES 8
65047
65048 static spinlock_t rttest_lock;
65049-static atomic_t rttest_event;
65050+static atomic_unchecked_t rttest_event;
65051
65052 struct test_thread_data {
65053 int opcode;
65054@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65055
65056 case RTTEST_LOCKCONT:
65057 td->mutexes[td->opdata] = 1;
65058- td->event = atomic_add_return(1, &rttest_event);
65059+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65060 return 0;
65061
65062 case RTTEST_RESET:
65063@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65064 return 0;
65065
65066 case RTTEST_RESETEVENT:
65067- atomic_set(&rttest_event, 0);
65068+ atomic_set_unchecked(&rttest_event, 0);
65069 return 0;
65070
65071 default:
65072@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65073 return ret;
65074
65075 td->mutexes[id] = 1;
65076- td->event = atomic_add_return(1, &rttest_event);
65077+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65078 rt_mutex_lock(&mutexes[id]);
65079- td->event = atomic_add_return(1, &rttest_event);
65080+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65081 td->mutexes[id] = 4;
65082 return 0;
65083
65084@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65085 return ret;
65086
65087 td->mutexes[id] = 1;
65088- td->event = atomic_add_return(1, &rttest_event);
65089+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65090 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65091- td->event = atomic_add_return(1, &rttest_event);
65092+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65093 td->mutexes[id] = ret ? 0 : 4;
65094 return ret ? -EINTR : 0;
65095
65096@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65097 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65098 return ret;
65099
65100- td->event = atomic_add_return(1, &rttest_event);
65101+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65102 rt_mutex_unlock(&mutexes[id]);
65103- td->event = atomic_add_return(1, &rttest_event);
65104+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65105 td->mutexes[id] = 0;
65106 return 0;
65107
65108@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65109 break;
65110
65111 td->mutexes[dat] = 2;
65112- td->event = atomic_add_return(1, &rttest_event);
65113+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65114 break;
65115
65116 default:
65117@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65118 return;
65119
65120 td->mutexes[dat] = 3;
65121- td->event = atomic_add_return(1, &rttest_event);
65122+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65123 break;
65124
65125 case RTTEST_LOCKNOWAIT:
65126@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65127 return;
65128
65129 td->mutexes[dat] = 1;
65130- td->event = atomic_add_return(1, &rttest_event);
65131+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65132 return;
65133
65134 default:
65135diff -urNp linux-3.0.7/kernel/sched_autogroup.c linux-3.0.7/kernel/sched_autogroup.c
65136--- linux-3.0.7/kernel/sched_autogroup.c 2011-07-21 22:17:23.000000000 -0400
65137+++ linux-3.0.7/kernel/sched_autogroup.c 2011-08-23 21:47:56.000000000 -0400
65138@@ -7,7 +7,7 @@
65139
65140 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65141 static struct autogroup autogroup_default;
65142-static atomic_t autogroup_seq_nr;
65143+static atomic_unchecked_t autogroup_seq_nr;
65144
65145 static void __init autogroup_init(struct task_struct *init_task)
65146 {
65147@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65148
65149 kref_init(&ag->kref);
65150 init_rwsem(&ag->lock);
65151- ag->id = atomic_inc_return(&autogroup_seq_nr);
65152+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65153 ag->tg = tg;
65154 #ifdef CONFIG_RT_GROUP_SCHED
65155 /*
65156diff -urNp linux-3.0.7/kernel/sched.c linux-3.0.7/kernel/sched.c
65157--- linux-3.0.7/kernel/sched.c 2011-10-17 23:17:09.000000000 -0400
65158+++ linux-3.0.7/kernel/sched.c 2011-10-17 23:17:19.000000000 -0400
65159@@ -4227,6 +4227,8 @@ static void __sched __schedule(void)
65160 struct rq *rq;
65161 int cpu;
65162
65163+ pax_track_stack();
65164+
65165 need_resched:
65166 preempt_disable();
65167 cpu = smp_processor_id();
65168@@ -4920,6 +4922,8 @@ int can_nice(const struct task_struct *p
65169 /* convert nice value [19,-20] to rlimit style value [1,40] */
65170 int nice_rlim = 20 - nice;
65171
65172+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65173+
65174 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65175 capable(CAP_SYS_NICE));
65176 }
65177@@ -4953,7 +4957,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65178 if (nice > 19)
65179 nice = 19;
65180
65181- if (increment < 0 && !can_nice(current, nice))
65182+ if (increment < 0 && (!can_nice(current, nice) ||
65183+ gr_handle_chroot_nice()))
65184 return -EPERM;
65185
65186 retval = security_task_setnice(current, nice);
65187@@ -5097,6 +5102,7 @@ recheck:
65188 unsigned long rlim_rtprio =
65189 task_rlimit(p, RLIMIT_RTPRIO);
65190
65191+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65192 /* can't set/change the rt policy */
65193 if (policy != p->policy && !rlim_rtprio)
65194 return -EPERM;
65195diff -urNp linux-3.0.7/kernel/sched_fair.c linux-3.0.7/kernel/sched_fair.c
65196--- linux-3.0.7/kernel/sched_fair.c 2011-07-21 22:17:23.000000000 -0400
65197+++ linux-3.0.7/kernel/sched_fair.c 2011-08-23 21:47:56.000000000 -0400
65198@@ -4050,7 +4050,7 @@ static void nohz_idle_balance(int this_c
65199 * run_rebalance_domains is triggered when needed from the scheduler tick.
65200 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65201 */
65202-static void run_rebalance_domains(struct softirq_action *h)
65203+static void run_rebalance_domains(void)
65204 {
65205 int this_cpu = smp_processor_id();
65206 struct rq *this_rq = cpu_rq(this_cpu);
65207diff -urNp linux-3.0.7/kernel/signal.c linux-3.0.7/kernel/signal.c
65208--- linux-3.0.7/kernel/signal.c 2011-07-21 22:17:23.000000000 -0400
65209+++ linux-3.0.7/kernel/signal.c 2011-08-23 21:48:14.000000000 -0400
65210@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65211
65212 int print_fatal_signals __read_mostly;
65213
65214-static void __user *sig_handler(struct task_struct *t, int sig)
65215+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65216 {
65217 return t->sighand->action[sig - 1].sa.sa_handler;
65218 }
65219
65220-static int sig_handler_ignored(void __user *handler, int sig)
65221+static int sig_handler_ignored(__sighandler_t handler, int sig)
65222 {
65223 /* Is it explicitly or implicitly ignored? */
65224 return handler == SIG_IGN ||
65225@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65226 static int sig_task_ignored(struct task_struct *t, int sig,
65227 int from_ancestor_ns)
65228 {
65229- void __user *handler;
65230+ __sighandler_t handler;
65231
65232 handler = sig_handler(t, sig);
65233
65234@@ -320,6 +320,9 @@ __sigqueue_alloc(int sig, struct task_st
65235 atomic_inc(&user->sigpending);
65236 rcu_read_unlock();
65237
65238+ if (!override_rlimit)
65239+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65240+
65241 if (override_rlimit ||
65242 atomic_read(&user->sigpending) <=
65243 task_rlimit(t, RLIMIT_SIGPENDING)) {
65244@@ -444,7 +447,7 @@ flush_signal_handlers(struct task_struct
65245
65246 int unhandled_signal(struct task_struct *tsk, int sig)
65247 {
65248- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65249+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65250 if (is_global_init(tsk))
65251 return 1;
65252 if (handler != SIG_IGN && handler != SIG_DFL)
65253@@ -770,6 +773,13 @@ static int check_kill_permission(int sig
65254 }
65255 }
65256
65257+ /* allow glibc communication via tgkill to other threads in our
65258+ thread group */
65259+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65260+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65261+ && gr_handle_signal(t, sig))
65262+ return -EPERM;
65263+
65264 return security_task_kill(t, info, sig, 0);
65265 }
65266
65267@@ -1092,7 +1102,7 @@ __group_send_sig_info(int sig, struct si
65268 return send_signal(sig, info, p, 1);
65269 }
65270
65271-static int
65272+int
65273 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65274 {
65275 return send_signal(sig, info, t, 0);
65276@@ -1129,6 +1139,7 @@ force_sig_info(int sig, struct siginfo *
65277 unsigned long int flags;
65278 int ret, blocked, ignored;
65279 struct k_sigaction *action;
65280+ int is_unhandled = 0;
65281
65282 spin_lock_irqsave(&t->sighand->siglock, flags);
65283 action = &t->sighand->action[sig-1];
65284@@ -1143,9 +1154,18 @@ force_sig_info(int sig, struct siginfo *
65285 }
65286 if (action->sa.sa_handler == SIG_DFL)
65287 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65288+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65289+ is_unhandled = 1;
65290 ret = specific_send_sig_info(sig, info, t);
65291 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65292
65293+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65294+ normal operation */
65295+ if (is_unhandled) {
65296+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65297+ gr_handle_crash(t, sig);
65298+ }
65299+
65300 return ret;
65301 }
65302
65303@@ -1212,8 +1232,11 @@ int group_send_sig_info(int sig, struct
65304 ret = check_kill_permission(sig, info, p);
65305 rcu_read_unlock();
65306
65307- if (!ret && sig)
65308+ if (!ret && sig) {
65309 ret = do_send_sig_info(sig, info, p, true);
65310+ if (!ret)
65311+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65312+ }
65313
65314 return ret;
65315 }
65316@@ -1839,6 +1862,8 @@ void ptrace_notify(int exit_code)
65317 {
65318 siginfo_t info;
65319
65320+ pax_track_stack();
65321+
65322 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65323
65324 memset(&info, 0, sizeof info);
65325@@ -2639,7 +2664,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65326 int error = -ESRCH;
65327
65328 rcu_read_lock();
65329- p = find_task_by_vpid(pid);
65330+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65331+ /* allow glibc communication via tgkill to other threads in our
65332+ thread group */
65333+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65334+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65335+ p = find_task_by_vpid_unrestricted(pid);
65336+ else
65337+#endif
65338+ p = find_task_by_vpid(pid);
65339 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65340 error = check_kill_permission(sig, info, p);
65341 /*
65342diff -urNp linux-3.0.7/kernel/smp.c linux-3.0.7/kernel/smp.c
65343--- linux-3.0.7/kernel/smp.c 2011-07-21 22:17:23.000000000 -0400
65344+++ linux-3.0.7/kernel/smp.c 2011-08-23 21:47:56.000000000 -0400
65345@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65346 }
65347 EXPORT_SYMBOL(smp_call_function);
65348
65349-void ipi_call_lock(void)
65350+void ipi_call_lock(void) __acquires(call_function.lock)
65351 {
65352 raw_spin_lock(&call_function.lock);
65353 }
65354
65355-void ipi_call_unlock(void)
65356+void ipi_call_unlock(void) __releases(call_function.lock)
65357 {
65358 raw_spin_unlock(&call_function.lock);
65359 }
65360
65361-void ipi_call_lock_irq(void)
65362+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65363 {
65364 raw_spin_lock_irq(&call_function.lock);
65365 }
65366
65367-void ipi_call_unlock_irq(void)
65368+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65369 {
65370 raw_spin_unlock_irq(&call_function.lock);
65371 }
65372diff -urNp linux-3.0.7/kernel/softirq.c linux-3.0.7/kernel/softirq.c
65373--- linux-3.0.7/kernel/softirq.c 2011-07-21 22:17:23.000000000 -0400
65374+++ linux-3.0.7/kernel/softirq.c 2011-08-23 21:47:56.000000000 -0400
65375@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65376
65377 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65378
65379-char *softirq_to_name[NR_SOFTIRQS] = {
65380+const char * const softirq_to_name[NR_SOFTIRQS] = {
65381 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65382 "TASKLET", "SCHED", "HRTIMER", "RCU"
65383 };
65384@@ -235,7 +235,7 @@ restart:
65385 kstat_incr_softirqs_this_cpu(vec_nr);
65386
65387 trace_softirq_entry(vec_nr);
65388- h->action(h);
65389+ h->action();
65390 trace_softirq_exit(vec_nr);
65391 if (unlikely(prev_count != preempt_count())) {
65392 printk(KERN_ERR "huh, entered softirq %u %s %p"
65393@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65394 local_irq_restore(flags);
65395 }
65396
65397-void open_softirq(int nr, void (*action)(struct softirq_action *))
65398+void open_softirq(int nr, void (*action)(void))
65399 {
65400- softirq_vec[nr].action = action;
65401+ pax_open_kernel();
65402+ *(void **)&softirq_vec[nr].action = action;
65403+ pax_close_kernel();
65404 }
65405
65406 /*
65407@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65408
65409 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65410
65411-static void tasklet_action(struct softirq_action *a)
65412+static void tasklet_action(void)
65413 {
65414 struct tasklet_struct *list;
65415
65416@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65417 }
65418 }
65419
65420-static void tasklet_hi_action(struct softirq_action *a)
65421+static void tasklet_hi_action(void)
65422 {
65423 struct tasklet_struct *list;
65424
65425diff -urNp linux-3.0.7/kernel/sys.c linux-3.0.7/kernel/sys.c
65426--- linux-3.0.7/kernel/sys.c 2011-09-02 18:11:26.000000000 -0400
65427+++ linux-3.0.7/kernel/sys.c 2011-10-06 04:17:55.000000000 -0400
65428@@ -158,6 +158,12 @@ static int set_one_prio(struct task_stru
65429 error = -EACCES;
65430 goto out;
65431 }
65432+
65433+ if (gr_handle_chroot_setpriority(p, niceval)) {
65434+ error = -EACCES;
65435+ goto out;
65436+ }
65437+
65438 no_nice = security_task_setnice(p, niceval);
65439 if (no_nice) {
65440 error = no_nice;
65441@@ -541,6 +547,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65442 goto error;
65443 }
65444
65445+ if (gr_check_group_change(new->gid, new->egid, -1))
65446+ goto error;
65447+
65448 if (rgid != (gid_t) -1 ||
65449 (egid != (gid_t) -1 && egid != old->gid))
65450 new->sgid = new->egid;
65451@@ -570,6 +579,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65452 old = current_cred();
65453
65454 retval = -EPERM;
65455+
65456+ if (gr_check_group_change(gid, gid, gid))
65457+ goto error;
65458+
65459 if (nsown_capable(CAP_SETGID))
65460 new->gid = new->egid = new->sgid = new->fsgid = gid;
65461 else if (gid == old->gid || gid == old->sgid)
65462@@ -595,11 +608,18 @@ static int set_user(struct cred *new)
65463 if (!new_user)
65464 return -EAGAIN;
65465
65466+ /*
65467+ * We don't fail in case of NPROC limit excess here because too many
65468+ * poorly written programs don't check set*uid() return code, assuming
65469+ * it never fails if called by root. We may still enforce NPROC limit
65470+ * for programs doing set*uid()+execve() by harmlessly deferring the
65471+ * failure to the execve() stage.
65472+ */
65473 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
65474- new_user != INIT_USER) {
65475- free_uid(new_user);
65476- return -EAGAIN;
65477- }
65478+ new_user != INIT_USER)
65479+ current->flags |= PF_NPROC_EXCEEDED;
65480+ else
65481+ current->flags &= ~PF_NPROC_EXCEEDED;
65482
65483 free_uid(new->user);
65484 new->user = new_user;
65485@@ -650,6 +670,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65486 goto error;
65487 }
65488
65489+ if (gr_check_user_change(new->uid, new->euid, -1))
65490+ goto error;
65491+
65492 if (new->uid != old->uid) {
65493 retval = set_user(new);
65494 if (retval < 0)
65495@@ -694,6 +717,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65496 old = current_cred();
65497
65498 retval = -EPERM;
65499+
65500+ if (gr_check_crash_uid(uid))
65501+ goto error;
65502+ if (gr_check_user_change(uid, uid, uid))
65503+ goto error;
65504+
65505 if (nsown_capable(CAP_SETUID)) {
65506 new->suid = new->uid = uid;
65507 if (uid != old->uid) {
65508@@ -748,6 +777,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65509 goto error;
65510 }
65511
65512+ if (gr_check_user_change(ruid, euid, -1))
65513+ goto error;
65514+
65515 if (ruid != (uid_t) -1) {
65516 new->uid = ruid;
65517 if (ruid != old->uid) {
65518@@ -812,6 +844,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65519 goto error;
65520 }
65521
65522+ if (gr_check_group_change(rgid, egid, -1))
65523+ goto error;
65524+
65525 if (rgid != (gid_t) -1)
65526 new->gid = rgid;
65527 if (egid != (gid_t) -1)
65528@@ -858,6 +893,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65529 old = current_cred();
65530 old_fsuid = old->fsuid;
65531
65532+ if (gr_check_user_change(-1, -1, uid))
65533+ goto error;
65534+
65535 if (uid == old->uid || uid == old->euid ||
65536 uid == old->suid || uid == old->fsuid ||
65537 nsown_capable(CAP_SETUID)) {
65538@@ -868,6 +906,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65539 }
65540 }
65541
65542+error:
65543 abort_creds(new);
65544 return old_fsuid;
65545
65546@@ -894,12 +933,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65547 if (gid == old->gid || gid == old->egid ||
65548 gid == old->sgid || gid == old->fsgid ||
65549 nsown_capable(CAP_SETGID)) {
65550+ if (gr_check_group_change(-1, -1, gid))
65551+ goto error;
65552+
65553 if (gid != old_fsgid) {
65554 new->fsgid = gid;
65555 goto change_okay;
65556 }
65557 }
65558
65559+error:
65560 abort_creds(new);
65561 return old_fsgid;
65562
65563@@ -1205,19 +1248,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65564 return -EFAULT;
65565
65566 down_read(&uts_sem);
65567- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65568+ error = __copy_to_user(name->sysname, &utsname()->sysname,
65569 __OLD_UTS_LEN);
65570 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65571- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65572+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65573 __OLD_UTS_LEN);
65574 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65575- error |= __copy_to_user(&name->release, &utsname()->release,
65576+ error |= __copy_to_user(name->release, &utsname()->release,
65577 __OLD_UTS_LEN);
65578 error |= __put_user(0, name->release + __OLD_UTS_LEN);
65579- error |= __copy_to_user(&name->version, &utsname()->version,
65580+ error |= __copy_to_user(name->version, &utsname()->version,
65581 __OLD_UTS_LEN);
65582 error |= __put_user(0, name->version + __OLD_UTS_LEN);
65583- error |= __copy_to_user(&name->machine, &utsname()->machine,
65584+ error |= __copy_to_user(name->machine, &utsname()->machine,
65585 __OLD_UTS_LEN);
65586 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
65587 up_read(&uts_sem);
65588@@ -1680,7 +1723,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65589 error = get_dumpable(me->mm);
65590 break;
65591 case PR_SET_DUMPABLE:
65592- if (arg2 < 0 || arg2 > 1) {
65593+ if (arg2 > 1) {
65594 error = -EINVAL;
65595 break;
65596 }
65597diff -urNp linux-3.0.7/kernel/sysctl_binary.c linux-3.0.7/kernel/sysctl_binary.c
65598--- linux-3.0.7/kernel/sysctl_binary.c 2011-07-21 22:17:23.000000000 -0400
65599+++ linux-3.0.7/kernel/sysctl_binary.c 2011-10-06 04:17:55.000000000 -0400
65600@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
65601 int i;
65602
65603 set_fs(KERNEL_DS);
65604- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65605+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65606 set_fs(old_fs);
65607 if (result < 0)
65608 goto out_kfree;
65609@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
65610 }
65611
65612 set_fs(KERNEL_DS);
65613- result = vfs_write(file, buffer, str - buffer, &pos);
65614+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65615 set_fs(old_fs);
65616 if (result < 0)
65617 goto out_kfree;
65618@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
65619 int i;
65620
65621 set_fs(KERNEL_DS);
65622- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
65623+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
65624 set_fs(old_fs);
65625 if (result < 0)
65626 goto out_kfree;
65627@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
65628 }
65629
65630 set_fs(KERNEL_DS);
65631- result = vfs_write(file, buffer, str - buffer, &pos);
65632+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
65633 set_fs(old_fs);
65634 if (result < 0)
65635 goto out_kfree;
65636@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
65637 int i;
65638
65639 set_fs(KERNEL_DS);
65640- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65641+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65642 set_fs(old_fs);
65643 if (result < 0)
65644 goto out;
65645@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
65646 __le16 dnaddr;
65647
65648 set_fs(KERNEL_DS);
65649- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
65650+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
65651 set_fs(old_fs);
65652 if (result < 0)
65653 goto out;
65654@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
65655 le16_to_cpu(dnaddr) & 0x3ff);
65656
65657 set_fs(KERNEL_DS);
65658- result = vfs_write(file, buf, len, &pos);
65659+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
65660 set_fs(old_fs);
65661 if (result < 0)
65662 goto out;
65663diff -urNp linux-3.0.7/kernel/sysctl.c linux-3.0.7/kernel/sysctl.c
65664--- linux-3.0.7/kernel/sysctl.c 2011-07-21 22:17:23.000000000 -0400
65665+++ linux-3.0.7/kernel/sysctl.c 2011-08-23 21:48:14.000000000 -0400
65666@@ -85,6 +85,13 @@
65667
65668
65669 #if defined(CONFIG_SYSCTL)
65670+#include <linux/grsecurity.h>
65671+#include <linux/grinternal.h>
65672+
65673+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65674+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65675+ const int op);
65676+extern int gr_handle_chroot_sysctl(const int op);
65677
65678 /* External variables not in a header file. */
65679 extern int sysctl_overcommit_memory;
65680@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
65681 }
65682
65683 #endif
65684+extern struct ctl_table grsecurity_table[];
65685
65686 static struct ctl_table root_table[];
65687 static struct ctl_table_root sysctl_table_root;
65688@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
65689 int sysctl_legacy_va_layout;
65690 #endif
65691
65692+#ifdef CONFIG_PAX_SOFTMODE
65693+static ctl_table pax_table[] = {
65694+ {
65695+ .procname = "softmode",
65696+ .data = &pax_softmode,
65697+ .maxlen = sizeof(unsigned int),
65698+ .mode = 0600,
65699+ .proc_handler = &proc_dointvec,
65700+ },
65701+
65702+ { }
65703+};
65704+#endif
65705+
65706 /* The default sysctl tables: */
65707
65708 static struct ctl_table root_table[] = {
65709@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
65710 #endif
65711
65712 static struct ctl_table kern_table[] = {
65713+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65714+ {
65715+ .procname = "grsecurity",
65716+ .mode = 0500,
65717+ .child = grsecurity_table,
65718+ },
65719+#endif
65720+
65721+#ifdef CONFIG_PAX_SOFTMODE
65722+ {
65723+ .procname = "pax",
65724+ .mode = 0500,
65725+ .child = pax_table,
65726+ },
65727+#endif
65728+
65729 {
65730 .procname = "sched_child_runs_first",
65731 .data = &sysctl_sched_child_runs_first,
65732@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
65733 .data = &modprobe_path,
65734 .maxlen = KMOD_PATH_LEN,
65735 .mode = 0644,
65736- .proc_handler = proc_dostring,
65737+ .proc_handler = proc_dostring_modpriv,
65738 },
65739 {
65740 .procname = "modules_disabled",
65741@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
65742 .extra1 = &zero,
65743 .extra2 = &one,
65744 },
65745+#endif
65746 {
65747 .procname = "kptr_restrict",
65748 .data = &kptr_restrict,
65749 .maxlen = sizeof(int),
65750 .mode = 0644,
65751 .proc_handler = proc_dmesg_restrict,
65752+#ifdef CONFIG_GRKERNSEC_HIDESYM
65753+ .extra1 = &two,
65754+#else
65755 .extra1 = &zero,
65756+#endif
65757 .extra2 = &two,
65758 },
65759-#endif
65760 {
65761 .procname = "ngroups_max",
65762 .data = &ngroups_max,
65763@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
65764 .proc_handler = proc_dointvec_minmax,
65765 .extra1 = &zero,
65766 },
65767+ {
65768+ .procname = "heap_stack_gap",
65769+ .data = &sysctl_heap_stack_gap,
65770+ .maxlen = sizeof(sysctl_heap_stack_gap),
65771+ .mode = 0644,
65772+ .proc_handler = proc_doulongvec_minmax,
65773+ },
65774 #else
65775 {
65776 .procname = "nr_trim_pages",
65777@@ -1714,6 +1763,17 @@ static int test_perm(int mode, int op)
65778 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
65779 {
65780 int mode;
65781+ int error;
65782+
65783+ if (table->parent != NULL && table->parent->procname != NULL &&
65784+ table->procname != NULL &&
65785+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65786+ return -EACCES;
65787+ if (gr_handle_chroot_sysctl(op))
65788+ return -EACCES;
65789+ error = gr_handle_sysctl(table, op);
65790+ if (error)
65791+ return error;
65792
65793 if (root->permissions)
65794 mode = root->permissions(root, current->nsproxy, table);
65795@@ -2118,6 +2178,16 @@ int proc_dostring(struct ctl_table *tabl
65796 buffer, lenp, ppos);
65797 }
65798
65799+int proc_dostring_modpriv(struct ctl_table *table, int write,
65800+ void __user *buffer, size_t *lenp, loff_t *ppos)
65801+{
65802+ if (write && !capable(CAP_SYS_MODULE))
65803+ return -EPERM;
65804+
65805+ return _proc_do_string(table->data, table->maxlen, write,
65806+ buffer, lenp, ppos);
65807+}
65808+
65809 static size_t proc_skip_spaces(char **buf)
65810 {
65811 size_t ret;
65812@@ -2223,6 +2293,8 @@ static int proc_put_long(void __user **b
65813 len = strlen(tmp);
65814 if (len > *size)
65815 len = *size;
65816+ if (len > sizeof(tmp))
65817+ len = sizeof(tmp);
65818 if (copy_to_user(*buf, tmp, len))
65819 return -EFAULT;
65820 *size -= len;
65821@@ -2539,8 +2611,11 @@ static int __do_proc_doulongvec_minmax(v
65822 *i = val;
65823 } else {
65824 val = convdiv * (*i) / convmul;
65825- if (!first)
65826+ if (!first) {
65827 err = proc_put_char(&buffer, &left, '\t');
65828+ if (err)
65829+ break;
65830+ }
65831 err = proc_put_long(&buffer, &left, val, false);
65832 if (err)
65833 break;
65834@@ -2935,6 +3010,12 @@ int proc_dostring(struct ctl_table *tabl
65835 return -ENOSYS;
65836 }
65837
65838+int proc_dostring_modpriv(struct ctl_table *table, int write,
65839+ void __user *buffer, size_t *lenp, loff_t *ppos)
65840+{
65841+ return -ENOSYS;
65842+}
65843+
65844 int proc_dointvec(struct ctl_table *table, int write,
65845 void __user *buffer, size_t *lenp, loff_t *ppos)
65846 {
65847@@ -2991,6 +3072,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65848 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65849 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65850 EXPORT_SYMBOL(proc_dostring);
65851+EXPORT_SYMBOL(proc_dostring_modpriv);
65852 EXPORT_SYMBOL(proc_doulongvec_minmax);
65853 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65854 EXPORT_SYMBOL(register_sysctl_table);
65855diff -urNp linux-3.0.7/kernel/sysctl_check.c linux-3.0.7/kernel/sysctl_check.c
65856--- linux-3.0.7/kernel/sysctl_check.c 2011-07-21 22:17:23.000000000 -0400
65857+++ linux-3.0.7/kernel/sysctl_check.c 2011-08-23 21:48:14.000000000 -0400
65858@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
65859 set_fail(&fail, table, "Directory with extra2");
65860 } else {
65861 if ((table->proc_handler == proc_dostring) ||
65862+ (table->proc_handler == proc_dostring_modpriv) ||
65863 (table->proc_handler == proc_dointvec) ||
65864 (table->proc_handler == proc_dointvec_minmax) ||
65865 (table->proc_handler == proc_dointvec_jiffies) ||
65866diff -urNp linux-3.0.7/kernel/taskstats.c linux-3.0.7/kernel/taskstats.c
65867--- linux-3.0.7/kernel/taskstats.c 2011-07-21 22:17:23.000000000 -0400
65868+++ linux-3.0.7/kernel/taskstats.c 2011-08-23 21:48:14.000000000 -0400
65869@@ -27,9 +27,12 @@
65870 #include <linux/cgroup.h>
65871 #include <linux/fs.h>
65872 #include <linux/file.h>
65873+#include <linux/grsecurity.h>
65874 #include <net/genetlink.h>
65875 #include <asm/atomic.h>
65876
65877+extern int gr_is_taskstats_denied(int pid);
65878+
65879 /*
65880 * Maximum length of a cpumask that can be specified in
65881 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65882@@ -558,6 +561,9 @@ err:
65883
65884 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
65885 {
65886+ if (gr_is_taskstats_denied(current->pid))
65887+ return -EACCES;
65888+
65889 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
65890 return cmd_attr_register_cpumask(info);
65891 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
65892diff -urNp linux-3.0.7/kernel/time/alarmtimer.c linux-3.0.7/kernel/time/alarmtimer.c
65893--- linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:54:54.000000000 -0400
65894+++ linux-3.0.7/kernel/time/alarmtimer.c 2011-10-16 21:55:28.000000000 -0400
65895@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
65896 {
65897 int error = 0;
65898 int i;
65899- struct k_clock alarm_clock = {
65900+ static struct k_clock alarm_clock = {
65901 .clock_getres = alarm_clock_getres,
65902 .clock_get = alarm_clock_get,
65903 .timer_create = alarm_timer_create,
65904diff -urNp linux-3.0.7/kernel/time/tick-broadcast.c linux-3.0.7/kernel/time/tick-broadcast.c
65905--- linux-3.0.7/kernel/time/tick-broadcast.c 2011-07-21 22:17:23.000000000 -0400
65906+++ linux-3.0.7/kernel/time/tick-broadcast.c 2011-08-23 21:47:56.000000000 -0400
65907@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
65908 * then clear the broadcast bit.
65909 */
65910 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65911- int cpu = smp_processor_id();
65912+ cpu = smp_processor_id();
65913
65914 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
65915 tick_broadcast_clear_oneshot(cpu);
65916diff -urNp linux-3.0.7/kernel/time/timekeeping.c linux-3.0.7/kernel/time/timekeeping.c
65917--- linux-3.0.7/kernel/time/timekeeping.c 2011-07-21 22:17:23.000000000 -0400
65918+++ linux-3.0.7/kernel/time/timekeeping.c 2011-08-23 21:48:14.000000000 -0400
65919@@ -14,6 +14,7 @@
65920 #include <linux/init.h>
65921 #include <linux/mm.h>
65922 #include <linux/sched.h>
65923+#include <linux/grsecurity.h>
65924 #include <linux/syscore_ops.h>
65925 #include <linux/clocksource.h>
65926 #include <linux/jiffies.h>
65927@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
65928 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
65929 return -EINVAL;
65930
65931+ gr_log_timechange();
65932+
65933 write_seqlock_irqsave(&xtime_lock, flags);
65934
65935 timekeeping_forward_now();
65936diff -urNp linux-3.0.7/kernel/time/timer_list.c linux-3.0.7/kernel/time/timer_list.c
65937--- linux-3.0.7/kernel/time/timer_list.c 2011-07-21 22:17:23.000000000 -0400
65938+++ linux-3.0.7/kernel/time/timer_list.c 2011-08-23 21:48:14.000000000 -0400
65939@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
65940
65941 static void print_name_offset(struct seq_file *m, void *sym)
65942 {
65943+#ifdef CONFIG_GRKERNSEC_HIDESYM
65944+ SEQ_printf(m, "<%p>", NULL);
65945+#else
65946 char symname[KSYM_NAME_LEN];
65947
65948 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
65949 SEQ_printf(m, "<%pK>", sym);
65950 else
65951 SEQ_printf(m, "%s", symname);
65952+#endif
65953 }
65954
65955 static void
65956@@ -112,7 +116,11 @@ next_one:
65957 static void
65958 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
65959 {
65960+#ifdef CONFIG_GRKERNSEC_HIDESYM
65961+ SEQ_printf(m, " .base: %p\n", NULL);
65962+#else
65963 SEQ_printf(m, " .base: %pK\n", base);
65964+#endif
65965 SEQ_printf(m, " .index: %d\n",
65966 base->index);
65967 SEQ_printf(m, " .resolution: %Lu nsecs\n",
65968@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
65969 {
65970 struct proc_dir_entry *pe;
65971
65972+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65973+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
65974+#else
65975 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
65976+#endif
65977 if (!pe)
65978 return -ENOMEM;
65979 return 0;
65980diff -urNp linux-3.0.7/kernel/time/timer_stats.c linux-3.0.7/kernel/time/timer_stats.c
65981--- linux-3.0.7/kernel/time/timer_stats.c 2011-07-21 22:17:23.000000000 -0400
65982+++ linux-3.0.7/kernel/time/timer_stats.c 2011-08-23 21:48:14.000000000 -0400
65983@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
65984 static unsigned long nr_entries;
65985 static struct entry entries[MAX_ENTRIES];
65986
65987-static atomic_t overflow_count;
65988+static atomic_unchecked_t overflow_count;
65989
65990 /*
65991 * The entries are in a hash-table, for fast lookup:
65992@@ -140,7 +140,7 @@ static void reset_entries(void)
65993 nr_entries = 0;
65994 memset(entries, 0, sizeof(entries));
65995 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
65996- atomic_set(&overflow_count, 0);
65997+ atomic_set_unchecked(&overflow_count, 0);
65998 }
65999
66000 static struct entry *alloc_entry(void)
66001@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66002 if (likely(entry))
66003 entry->count++;
66004 else
66005- atomic_inc(&overflow_count);
66006+ atomic_inc_unchecked(&overflow_count);
66007
66008 out_unlock:
66009 raw_spin_unlock_irqrestore(lock, flags);
66010@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66011
66012 static void print_name_offset(struct seq_file *m, unsigned long addr)
66013 {
66014+#ifdef CONFIG_GRKERNSEC_HIDESYM
66015+ seq_printf(m, "<%p>", NULL);
66016+#else
66017 char symname[KSYM_NAME_LEN];
66018
66019 if (lookup_symbol_name(addr, symname) < 0)
66020 seq_printf(m, "<%p>", (void *)addr);
66021 else
66022 seq_printf(m, "%s", symname);
66023+#endif
66024 }
66025
66026 static int tstats_show(struct seq_file *m, void *v)
66027@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66028
66029 seq_puts(m, "Timer Stats Version: v0.2\n");
66030 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66031- if (atomic_read(&overflow_count))
66032+ if (atomic_read_unchecked(&overflow_count))
66033 seq_printf(m, "Overflow: %d entries\n",
66034- atomic_read(&overflow_count));
66035+ atomic_read_unchecked(&overflow_count));
66036
66037 for (i = 0; i < nr_entries; i++) {
66038 entry = entries + i;
66039@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66040 {
66041 struct proc_dir_entry *pe;
66042
66043+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66044+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66045+#else
66046 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66047+#endif
66048 if (!pe)
66049 return -ENOMEM;
66050 return 0;
66051diff -urNp linux-3.0.7/kernel/time.c linux-3.0.7/kernel/time.c
66052--- linux-3.0.7/kernel/time.c 2011-07-21 22:17:23.000000000 -0400
66053+++ linux-3.0.7/kernel/time.c 2011-08-23 21:48:14.000000000 -0400
66054@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66055 return error;
66056
66057 if (tz) {
66058+ /* we log in do_settimeofday called below, so don't log twice
66059+ */
66060+ if (!tv)
66061+ gr_log_timechange();
66062+
66063 /* SMP safe, global irq locking makes it work. */
66064 sys_tz = *tz;
66065 update_vsyscall_tz();
66066diff -urNp linux-3.0.7/kernel/timer.c linux-3.0.7/kernel/timer.c
66067--- linux-3.0.7/kernel/timer.c 2011-07-21 22:17:23.000000000 -0400
66068+++ linux-3.0.7/kernel/timer.c 2011-08-23 21:47:56.000000000 -0400
66069@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66070 /*
66071 * This function runs timers and the timer-tq in bottom half context.
66072 */
66073-static void run_timer_softirq(struct softirq_action *h)
66074+static void run_timer_softirq(void)
66075 {
66076 struct tvec_base *base = __this_cpu_read(tvec_bases);
66077
66078diff -urNp linux-3.0.7/kernel/trace/blktrace.c linux-3.0.7/kernel/trace/blktrace.c
66079--- linux-3.0.7/kernel/trace/blktrace.c 2011-07-21 22:17:23.000000000 -0400
66080+++ linux-3.0.7/kernel/trace/blktrace.c 2011-08-23 21:47:56.000000000 -0400
66081@@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
66082 struct blk_trace *bt = filp->private_data;
66083 char buf[16];
66084
66085- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66086+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66087
66088 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66089 }
66090@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
66091 return 1;
66092
66093 bt = buf->chan->private_data;
66094- atomic_inc(&bt->dropped);
66095+ atomic_inc_unchecked(&bt->dropped);
66096 return 0;
66097 }
66098
66099@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
66100
66101 bt->dir = dir;
66102 bt->dev = dev;
66103- atomic_set(&bt->dropped, 0);
66104+ atomic_set_unchecked(&bt->dropped, 0);
66105
66106 ret = -EIO;
66107 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66108diff -urNp linux-3.0.7/kernel/trace/ftrace.c linux-3.0.7/kernel/trace/ftrace.c
66109--- linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:09.000000000 -0400
66110+++ linux-3.0.7/kernel/trace/ftrace.c 2011-10-17 23:17:19.000000000 -0400
66111@@ -1566,12 +1566,17 @@ ftrace_code_disable(struct module *mod,
66112 if (unlikely(ftrace_disabled))
66113 return 0;
66114
66115+ ret = ftrace_arch_code_modify_prepare();
66116+ FTRACE_WARN_ON(ret);
66117+ if (ret)
66118+ return 0;
66119+
66120 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66121+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66122 if (ret) {
66123 ftrace_bug(ret, ip);
66124- return 0;
66125 }
66126- return 1;
66127+ return ret ? 0 : 1;
66128 }
66129
66130 /*
66131@@ -2570,7 +2575,7 @@ static void ftrace_free_entry_rcu(struct
66132
66133 int
66134 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66135- void *data)
66136+ void *data)
66137 {
66138 struct ftrace_func_probe *entry;
66139 struct ftrace_page *pg;
66140diff -urNp linux-3.0.7/kernel/trace/trace.c linux-3.0.7/kernel/trace/trace.c
66141--- linux-3.0.7/kernel/trace/trace.c 2011-07-21 22:17:23.000000000 -0400
66142+++ linux-3.0.7/kernel/trace/trace.c 2011-08-23 21:48:14.000000000 -0400
66143@@ -3339,6 +3339,8 @@ static ssize_t tracing_splice_read_pipe(
66144 size_t rem;
66145 unsigned int i;
66146
66147+ pax_track_stack();
66148+
66149 if (splice_grow_spd(pipe, &spd))
66150 return -ENOMEM;
66151
66152@@ -3822,6 +3824,8 @@ tracing_buffers_splice_read(struct file
66153 int entries, size, i;
66154 size_t ret;
66155
66156+ pax_track_stack();
66157+
66158 if (splice_grow_spd(pipe, &spd))
66159 return -ENOMEM;
66160
66161@@ -3990,10 +3994,9 @@ static const struct file_operations trac
66162 };
66163 #endif
66164
66165-static struct dentry *d_tracer;
66166-
66167 struct dentry *tracing_init_dentry(void)
66168 {
66169+ static struct dentry *d_tracer;
66170 static int once;
66171
66172 if (d_tracer)
66173@@ -4013,10 +4016,9 @@ struct dentry *tracing_init_dentry(void)
66174 return d_tracer;
66175 }
66176
66177-static struct dentry *d_percpu;
66178-
66179 struct dentry *tracing_dentry_percpu(void)
66180 {
66181+ static struct dentry *d_percpu;
66182 static int once;
66183 struct dentry *d_tracer;
66184
66185diff -urNp linux-3.0.7/kernel/trace/trace_events.c linux-3.0.7/kernel/trace/trace_events.c
66186--- linux-3.0.7/kernel/trace/trace_events.c 2011-09-02 18:11:21.000000000 -0400
66187+++ linux-3.0.7/kernel/trace/trace_events.c 2011-08-23 21:47:56.000000000 -0400
66188@@ -1318,10 +1318,6 @@ static LIST_HEAD(ftrace_module_file_list
66189 struct ftrace_module_file_ops {
66190 struct list_head list;
66191 struct module *mod;
66192- struct file_operations id;
66193- struct file_operations enable;
66194- struct file_operations format;
66195- struct file_operations filter;
66196 };
66197
66198 static struct ftrace_module_file_ops *
66199@@ -1342,17 +1338,12 @@ trace_create_file_ops(struct module *mod
66200
66201 file_ops->mod = mod;
66202
66203- file_ops->id = ftrace_event_id_fops;
66204- file_ops->id.owner = mod;
66205-
66206- file_ops->enable = ftrace_enable_fops;
66207- file_ops->enable.owner = mod;
66208-
66209- file_ops->filter = ftrace_event_filter_fops;
66210- file_ops->filter.owner = mod;
66211-
66212- file_ops->format = ftrace_event_format_fops;
66213- file_ops->format.owner = mod;
66214+ pax_open_kernel();
66215+ *(void **)&mod->trace_id.owner = mod;
66216+ *(void **)&mod->trace_enable.owner = mod;
66217+ *(void **)&mod->trace_filter.owner = mod;
66218+ *(void **)&mod->trace_format.owner = mod;
66219+ pax_close_kernel();
66220
66221 list_add(&file_ops->list, &ftrace_module_file_list);
66222
66223@@ -1376,8 +1367,8 @@ static void trace_module_add_events(stru
66224
66225 for_each_event(call, start, end) {
66226 __trace_add_event_call(*call, mod,
66227- &file_ops->id, &file_ops->enable,
66228- &file_ops->filter, &file_ops->format);
66229+ &mod->trace_id, &mod->trace_enable,
66230+ &mod->trace_filter, &mod->trace_format);
66231 }
66232 }
66233
66234diff -urNp linux-3.0.7/kernel/trace/trace_kprobe.c linux-3.0.7/kernel/trace/trace_kprobe.c
66235--- linux-3.0.7/kernel/trace/trace_kprobe.c 2011-07-21 22:17:23.000000000 -0400
66236+++ linux-3.0.7/kernel/trace/trace_kprobe.c 2011-10-06 04:17:55.000000000 -0400
66237@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66238 long ret;
66239 int maxlen = get_rloc_len(*(u32 *)dest);
66240 u8 *dst = get_rloc_data(dest);
66241- u8 *src = addr;
66242+ const u8 __user *src = (const u8 __force_user *)addr;
66243 mm_segment_t old_fs = get_fs();
66244 if (!maxlen)
66245 return;
66246@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66247 pagefault_disable();
66248 do
66249 ret = __copy_from_user_inatomic(dst++, src++, 1);
66250- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66251+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66252 dst[-1] = '\0';
66253 pagefault_enable();
66254 set_fs(old_fs);
66255@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66256 ((u8 *)get_rloc_data(dest))[0] = '\0';
66257 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66258 } else
66259- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66260+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66261 get_rloc_offs(*(u32 *)dest));
66262 }
66263 /* Return the length of string -- including null terminal byte */
66264@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66265 set_fs(KERNEL_DS);
66266 pagefault_disable();
66267 do {
66268- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66269+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66270 len++;
66271 } while (c && ret == 0 && len < MAX_STRING_SIZE);
66272 pagefault_enable();
66273diff -urNp linux-3.0.7/kernel/trace/trace_mmiotrace.c linux-3.0.7/kernel/trace/trace_mmiotrace.c
66274--- linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-07-21 22:17:23.000000000 -0400
66275+++ linux-3.0.7/kernel/trace/trace_mmiotrace.c 2011-08-23 21:47:56.000000000 -0400
66276@@ -24,7 +24,7 @@ struct header_iter {
66277 static struct trace_array *mmio_trace_array;
66278 static bool overrun_detected;
66279 static unsigned long prev_overruns;
66280-static atomic_t dropped_count;
66281+static atomic_unchecked_t dropped_count;
66282
66283 static void mmio_reset_data(struct trace_array *tr)
66284 {
66285@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66286
66287 static unsigned long count_overruns(struct trace_iterator *iter)
66288 {
66289- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66290+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66291 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66292
66293 if (over > prev_overruns)
66294@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66295 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66296 sizeof(*entry), 0, pc);
66297 if (!event) {
66298- atomic_inc(&dropped_count);
66299+ atomic_inc_unchecked(&dropped_count);
66300 return;
66301 }
66302 entry = ring_buffer_event_data(event);
66303@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66304 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66305 sizeof(*entry), 0, pc);
66306 if (!event) {
66307- atomic_inc(&dropped_count);
66308+ atomic_inc_unchecked(&dropped_count);
66309 return;
66310 }
66311 entry = ring_buffer_event_data(event);
66312diff -urNp linux-3.0.7/kernel/trace/trace_output.c linux-3.0.7/kernel/trace/trace_output.c
66313--- linux-3.0.7/kernel/trace/trace_output.c 2011-07-21 22:17:23.000000000 -0400
66314+++ linux-3.0.7/kernel/trace/trace_output.c 2011-08-23 21:47:56.000000000 -0400
66315@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66316
66317 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66318 if (!IS_ERR(p)) {
66319- p = mangle_path(s->buffer + s->len, p, "\n");
66320+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66321 if (p) {
66322 s->len = p - s->buffer;
66323 return 1;
66324diff -urNp linux-3.0.7/kernel/trace/trace_stack.c linux-3.0.7/kernel/trace/trace_stack.c
66325--- linux-3.0.7/kernel/trace/trace_stack.c 2011-07-21 22:17:23.000000000 -0400
66326+++ linux-3.0.7/kernel/trace/trace_stack.c 2011-08-23 21:47:56.000000000 -0400
66327@@ -50,7 +50,7 @@ static inline void check_stack(void)
66328 return;
66329
66330 /* we do not handle interrupt stacks yet */
66331- if (!object_is_on_stack(&this_size))
66332+ if (!object_starts_on_stack(&this_size))
66333 return;
66334
66335 local_irq_save(flags);
66336diff -urNp linux-3.0.7/kernel/trace/trace_workqueue.c linux-3.0.7/kernel/trace/trace_workqueue.c
66337--- linux-3.0.7/kernel/trace/trace_workqueue.c 2011-07-21 22:17:23.000000000 -0400
66338+++ linux-3.0.7/kernel/trace/trace_workqueue.c 2011-08-23 21:47:56.000000000 -0400
66339@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66340 int cpu;
66341 pid_t pid;
66342 /* Can be inserted from interrupt or user context, need to be atomic */
66343- atomic_t inserted;
66344+ atomic_unchecked_t inserted;
66345 /*
66346 * Don't need to be atomic, works are serialized in a single workqueue thread
66347 * on a single CPU.
66348@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66349 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66350 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66351 if (node->pid == wq_thread->pid) {
66352- atomic_inc(&node->inserted);
66353+ atomic_inc_unchecked(&node->inserted);
66354 goto found;
66355 }
66356 }
66357@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66358 tsk = get_pid_task(pid, PIDTYPE_PID);
66359 if (tsk) {
66360 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66361- atomic_read(&cws->inserted), cws->executed,
66362+ atomic_read_unchecked(&cws->inserted), cws->executed,
66363 tsk->comm);
66364 put_task_struct(tsk);
66365 }
66366diff -urNp linux-3.0.7/lib/bitmap.c linux-3.0.7/lib/bitmap.c
66367--- linux-3.0.7/lib/bitmap.c 2011-07-21 22:17:23.000000000 -0400
66368+++ linux-3.0.7/lib/bitmap.c 2011-10-06 04:17:55.000000000 -0400
66369@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsi
66370 {
66371 int c, old_c, totaldigits, ndigits, nchunks, nbits;
66372 u32 chunk;
66373- const char __user *ubuf = buf;
66374+ const char __user *ubuf = (const char __force_user *)buf;
66375
66376 bitmap_zero(maskp, nmaskbits);
66377
66378@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user
66379 {
66380 if (!access_ok(VERIFY_READ, ubuf, ulen))
66381 return -EFAULT;
66382- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66383+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66384 }
66385 EXPORT_SYMBOL(bitmap_parse_user);
66386
66387@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char
66388 {
66389 unsigned a, b;
66390 int c, old_c, totaldigits;
66391- const char __user *ubuf = buf;
66392+ const char __user *ubuf = (const char __force_user *)buf;
66393 int exp_digit, in_range;
66394
66395 totaldigits = c = 0;
66396@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __u
66397 {
66398 if (!access_ok(VERIFY_READ, ubuf, ulen))
66399 return -EFAULT;
66400- return __bitmap_parselist((const char *)ubuf,
66401+ return __bitmap_parselist((const char __force_kernel *)ubuf,
66402 ulen, 1, maskp, nmaskbits);
66403 }
66404 EXPORT_SYMBOL(bitmap_parselist_user);
66405diff -urNp linux-3.0.7/lib/bug.c linux-3.0.7/lib/bug.c
66406--- linux-3.0.7/lib/bug.c 2011-07-21 22:17:23.000000000 -0400
66407+++ linux-3.0.7/lib/bug.c 2011-08-23 21:47:56.000000000 -0400
66408@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66409 return BUG_TRAP_TYPE_NONE;
66410
66411 bug = find_bug(bugaddr);
66412+ if (!bug)
66413+ return BUG_TRAP_TYPE_NONE;
66414
66415 file = NULL;
66416 line = 0;
66417diff -urNp linux-3.0.7/lib/debugobjects.c linux-3.0.7/lib/debugobjects.c
66418--- linux-3.0.7/lib/debugobjects.c 2011-07-21 22:17:23.000000000 -0400
66419+++ linux-3.0.7/lib/debugobjects.c 2011-08-23 21:47:56.000000000 -0400
66420@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66421 if (limit > 4)
66422 return;
66423
66424- is_on_stack = object_is_on_stack(addr);
66425+ is_on_stack = object_starts_on_stack(addr);
66426 if (is_on_stack == onstack)
66427 return;
66428
66429diff -urNp linux-3.0.7/lib/devres.c linux-3.0.7/lib/devres.c
66430--- linux-3.0.7/lib/devres.c 2011-07-21 22:17:23.000000000 -0400
66431+++ linux-3.0.7/lib/devres.c 2011-10-06 04:17:55.000000000 -0400
66432@@ -81,7 +81,7 @@ void devm_iounmap(struct device *dev, vo
66433 {
66434 iounmap(addr);
66435 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66436- (void *)addr));
66437+ (void __force *)addr));
66438 }
66439 EXPORT_SYMBOL(devm_iounmap);
66440
66441@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66442 {
66443 ioport_unmap(addr);
66444 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66445- devm_ioport_map_match, (void *)addr));
66446+ devm_ioport_map_match, (void __force *)addr));
66447 }
66448 EXPORT_SYMBOL(devm_ioport_unmap);
66449
66450diff -urNp linux-3.0.7/lib/dma-debug.c linux-3.0.7/lib/dma-debug.c
66451--- linux-3.0.7/lib/dma-debug.c 2011-07-21 22:17:23.000000000 -0400
66452+++ linux-3.0.7/lib/dma-debug.c 2011-08-23 21:47:56.000000000 -0400
66453@@ -870,7 +870,7 @@ out:
66454
66455 static void check_for_stack(struct device *dev, void *addr)
66456 {
66457- if (object_is_on_stack(addr))
66458+ if (object_starts_on_stack(addr))
66459 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66460 "stack [addr=%p]\n", addr);
66461 }
66462diff -urNp linux-3.0.7/lib/extable.c linux-3.0.7/lib/extable.c
66463--- linux-3.0.7/lib/extable.c 2011-07-21 22:17:23.000000000 -0400
66464+++ linux-3.0.7/lib/extable.c 2011-08-23 21:47:56.000000000 -0400
66465@@ -13,6 +13,7 @@
66466 #include <linux/init.h>
66467 #include <linux/sort.h>
66468 #include <asm/uaccess.h>
66469+#include <asm/pgtable.h>
66470
66471 #ifndef ARCH_HAS_SORT_EXTABLE
66472 /*
66473@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66474 void sort_extable(struct exception_table_entry *start,
66475 struct exception_table_entry *finish)
66476 {
66477+ pax_open_kernel();
66478 sort(start, finish - start, sizeof(struct exception_table_entry),
66479 cmp_ex, NULL);
66480+ pax_close_kernel();
66481 }
66482
66483 #ifdef CONFIG_MODULES
66484diff -urNp linux-3.0.7/lib/inflate.c linux-3.0.7/lib/inflate.c
66485--- linux-3.0.7/lib/inflate.c 2011-07-21 22:17:23.000000000 -0400
66486+++ linux-3.0.7/lib/inflate.c 2011-08-23 21:47:56.000000000 -0400
66487@@ -269,7 +269,7 @@ static void free(void *where)
66488 malloc_ptr = free_mem_ptr;
66489 }
66490 #else
66491-#define malloc(a) kmalloc(a, GFP_KERNEL)
66492+#define malloc(a) kmalloc((a), GFP_KERNEL)
66493 #define free(a) kfree(a)
66494 #endif
66495
66496diff -urNp linux-3.0.7/lib/Kconfig.debug linux-3.0.7/lib/Kconfig.debug
66497--- linux-3.0.7/lib/Kconfig.debug 2011-07-21 22:17:23.000000000 -0400
66498+++ linux-3.0.7/lib/Kconfig.debug 2011-08-23 21:48:14.000000000 -0400
66499@@ -1088,6 +1088,7 @@ config LATENCYTOP
66500 depends on DEBUG_KERNEL
66501 depends on STACKTRACE_SUPPORT
66502 depends on PROC_FS
66503+ depends on !GRKERNSEC_HIDESYM
66504 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66505 select KALLSYMS
66506 select KALLSYMS_ALL
66507diff -urNp linux-3.0.7/lib/kref.c linux-3.0.7/lib/kref.c
66508--- linux-3.0.7/lib/kref.c 2011-07-21 22:17:23.000000000 -0400
66509+++ linux-3.0.7/lib/kref.c 2011-08-23 21:47:56.000000000 -0400
66510@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66511 */
66512 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66513 {
66514- WARN_ON(release == NULL);
66515+ BUG_ON(release == NULL);
66516 WARN_ON(release == (void (*)(struct kref *))kfree);
66517
66518 if (atomic_dec_and_test(&kref->refcount)) {
66519diff -urNp linux-3.0.7/lib/radix-tree.c linux-3.0.7/lib/radix-tree.c
66520--- linux-3.0.7/lib/radix-tree.c 2011-07-21 22:17:23.000000000 -0400
66521+++ linux-3.0.7/lib/radix-tree.c 2011-08-23 21:47:56.000000000 -0400
66522@@ -80,7 +80,7 @@ struct radix_tree_preload {
66523 int nr;
66524 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66525 };
66526-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66527+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66528
66529 static inline void *ptr_to_indirect(void *ptr)
66530 {
66531diff -urNp linux-3.0.7/lib/vsprintf.c linux-3.0.7/lib/vsprintf.c
66532--- linux-3.0.7/lib/vsprintf.c 2011-07-21 22:17:23.000000000 -0400
66533+++ linux-3.0.7/lib/vsprintf.c 2011-08-23 21:48:14.000000000 -0400
66534@@ -16,6 +16,9 @@
66535 * - scnprintf and vscnprintf
66536 */
66537
66538+#ifdef CONFIG_GRKERNSEC_HIDESYM
66539+#define __INCLUDED_BY_HIDESYM 1
66540+#endif
66541 #include <stdarg.h>
66542 #include <linux/module.h>
66543 #include <linux/types.h>
66544@@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
66545 char sym[KSYM_SYMBOL_LEN];
66546 if (ext == 'B')
66547 sprint_backtrace(sym, value);
66548- else if (ext != 'f' && ext != 's')
66549+ else if (ext != 'f' && ext != 's' && ext != 'a')
66550 sprint_symbol(sym, value);
66551 else
66552 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66553@@ -799,7 +802,11 @@ char *uuid_string(char *buf, char *end,
66554 return string(buf, end, uuid, spec);
66555 }
66556
66557+#ifdef CONFIG_GRKERNSEC_HIDESYM
66558+int kptr_restrict __read_mostly = 2;
66559+#else
66560 int kptr_restrict __read_mostly;
66561+#endif
66562
66563 /*
66564 * Show a '%p' thing. A kernel extension is that the '%p' is followed
66565@@ -813,6 +820,8 @@ int kptr_restrict __read_mostly;
66566 * - 'S' For symbolic direct pointers with offset
66567 * - 's' For symbolic direct pointers without offset
66568 * - 'B' For backtraced symbolic direct pointers with offset
66569+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66570+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66571 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66572 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66573 * - 'M' For a 6-byte MAC address, it prints the address in the
66574@@ -857,12 +866,12 @@ char *pointer(const char *fmt, char *buf
66575 {
66576 if (!ptr && *fmt != 'K') {
66577 /*
66578- * Print (null) with the same width as a pointer so it makes
66579+ * Print (nil) with the same width as a pointer so it makes
66580 * tabular output look nice.
66581 */
66582 if (spec.field_width == -1)
66583 spec.field_width = 2 * sizeof(void *);
66584- return string(buf, end, "(null)", spec);
66585+ return string(buf, end, "(nil)", spec);
66586 }
66587
66588 switch (*fmt) {
66589@@ -872,6 +881,13 @@ char *pointer(const char *fmt, char *buf
66590 /* Fallthrough */
66591 case 'S':
66592 case 's':
66593+#ifdef CONFIG_GRKERNSEC_HIDESYM
66594+ break;
66595+#else
66596+ return symbol_string(buf, end, ptr, spec, *fmt);
66597+#endif
66598+ case 'A':
66599+ case 'a':
66600 case 'B':
66601 return symbol_string(buf, end, ptr, spec, *fmt);
66602 case 'R':
66603@@ -1631,11 +1647,11 @@ int bstr_printf(char *buf, size_t size,
66604 typeof(type) value; \
66605 if (sizeof(type) == 8) { \
66606 args = PTR_ALIGN(args, sizeof(u32)); \
66607- *(u32 *)&value = *(u32 *)args; \
66608- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66609+ *(u32 *)&value = *(const u32 *)args; \
66610+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66611 } else { \
66612 args = PTR_ALIGN(args, sizeof(type)); \
66613- value = *(typeof(type) *)args; \
66614+ value = *(const typeof(type) *)args; \
66615 } \
66616 args += sizeof(type); \
66617 value; \
66618@@ -1698,7 +1714,7 @@ int bstr_printf(char *buf, size_t size,
66619 case FORMAT_TYPE_STR: {
66620 const char *str_arg = args;
66621 args += strlen(str_arg) + 1;
66622- str = string(str, end, (char *)str_arg, spec);
66623+ str = string(str, end, str_arg, spec);
66624 break;
66625 }
66626
66627diff -urNp linux-3.0.7/localversion-grsec linux-3.0.7/localversion-grsec
66628--- linux-3.0.7/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66629+++ linux-3.0.7/localversion-grsec 2011-08-23 21:48:14.000000000 -0400
66630@@ -0,0 +1 @@
66631+-grsec
66632diff -urNp linux-3.0.7/Makefile linux-3.0.7/Makefile
66633--- linux-3.0.7/Makefile 2011-10-17 23:17:08.000000000 -0400
66634+++ linux-3.0.7/Makefile 2011-10-17 23:17:19.000000000 -0400
66635@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66636
66637 HOSTCC = gcc
66638 HOSTCXX = g++
66639-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66640-HOSTCXXFLAGS = -O2
66641+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66642+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66643+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66644
66645 # Decide whether to build built-in, modular, or both.
66646 # Normally, just do built-in.
66647@@ -365,10 +366,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
66648 KBUILD_CPPFLAGS := -D__KERNEL__
66649
66650 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66651+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66652 -fno-strict-aliasing -fno-common \
66653 -Werror-implicit-function-declaration \
66654 -Wno-format-security \
66655 -fno-delete-null-pointer-checks
66656+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66657 KBUILD_AFLAGS_KERNEL :=
66658 KBUILD_CFLAGS_KERNEL :=
66659 KBUILD_AFLAGS := -D__ASSEMBLY__
66660@@ -407,8 +410,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66661 # Rules shared between *config targets and build targets
66662
66663 # Basic helpers built in scripts/
66664-PHONY += scripts_basic
66665-scripts_basic:
66666+PHONY += scripts_basic gcc-plugins
66667+scripts_basic: gcc-plugins
66668 $(Q)$(MAKE) $(build)=scripts/basic
66669 $(Q)rm -f .tmp_quiet_recordmcount
66670
66671@@ -564,6 +567,36 @@ else
66672 KBUILD_CFLAGS += -O2
66673 endif
66674
66675+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
66676+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66677+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66678+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66679+endif
66680+ifdef CONFIG_KALLOCSTAT_PLUGIN
66681+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
66682+endif
66683+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
66684+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
66685+endif
66686+ifdef CONFIG_CHECKER_PLUGIN
66687+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
66688+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
66689+endif
66690+endif
66691+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
66692+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
66693+gcc-plugins:
66694+ $(Q)$(MAKE) $(build)=tools/gcc
66695+else
66696+gcc-plugins:
66697+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66698+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66699+else
66700+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66701+endif
66702+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66703+endif
66704+
66705 include $(srctree)/arch/$(SRCARCH)/Makefile
66706
66707 ifneq ($(CONFIG_FRAME_WARN),0)
66708@@ -708,7 +741,7 @@ export mod_strip_cmd
66709
66710
66711 ifeq ($(KBUILD_EXTMOD),)
66712-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66713+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66714
66715 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66716 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66717@@ -932,6 +965,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
66718
66719 # The actual objects are generated when descending,
66720 # make sure no implicit rule kicks in
66721+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
66722 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
66723
66724 # Handle descending into subdirectories listed in $(vmlinux-dirs)
66725@@ -941,7 +975,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
66726 # Error messages still appears in the original language
66727
66728 PHONY += $(vmlinux-dirs)
66729-$(vmlinux-dirs): prepare scripts
66730+$(vmlinux-dirs): gcc-plugins prepare scripts
66731 $(Q)$(MAKE) $(build)=$@
66732
66733 # Store (new) KERNELRELASE string in include/config/kernel.release
66734@@ -986,6 +1020,7 @@ prepare0: archprepare FORCE
66735 $(Q)$(MAKE) $(build)=. missing-syscalls
66736
66737 # All the preparing..
66738+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
66739 prepare: prepare0
66740
66741 # Generate some files
66742@@ -1087,6 +1122,7 @@ all: modules
66743 # using awk while concatenating to the final file.
66744
66745 PHONY += modules
66746+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66747 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
66748 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66749 @$(kecho) ' Building modules, stage 2.';
66750@@ -1102,7 +1138,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
66751
66752 # Target to prepare building external modules
66753 PHONY += modules_prepare
66754-modules_prepare: prepare scripts
66755+modules_prepare: gcc-plugins prepare scripts
66756
66757 # Target to install modules
66758 PHONY += modules_install
66759@@ -1198,7 +1234,7 @@ distclean: mrproper
66760 @find $(srctree) $(RCS_FIND_IGNORE) \
66761 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66762 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66763- -o -name '.*.rej' -o -size 0 \
66764+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
66765 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66766 -type f -print | xargs rm -f
66767
66768@@ -1359,6 +1395,7 @@ PHONY += $(module-dirs) modules
66769 $(module-dirs): crmodverdir $(objtree)/Module.symvers
66770 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
66771
66772+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
66773 modules: $(module-dirs)
66774 @$(kecho) ' Building modules, stage 2.';
66775 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
66776@@ -1485,17 +1522,19 @@ else
66777 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
66778 endif
66779
66780-%.s: %.c prepare scripts FORCE
66781+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
66782+%.s: %.c gcc-plugins prepare scripts FORCE
66783 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66784 %.i: %.c prepare scripts FORCE
66785 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66786-%.o: %.c prepare scripts FORCE
66787+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
66788+%.o: %.c gcc-plugins prepare scripts FORCE
66789 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66790 %.lst: %.c prepare scripts FORCE
66791 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66792-%.s: %.S prepare scripts FORCE
66793+%.s: %.S gcc-plugins prepare scripts FORCE
66794 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66795-%.o: %.S prepare scripts FORCE
66796+%.o: %.S gcc-plugins prepare scripts FORCE
66797 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66798 %.symtypes: %.c prepare scripts FORCE
66799 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
66800@@ -1505,11 +1544,13 @@ endif
66801 $(cmd_crmodverdir)
66802 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66803 $(build)=$(build-dir)
66804-%/: prepare scripts FORCE
66805+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
66806+%/: gcc-plugins prepare scripts FORCE
66807 $(cmd_crmodverdir)
66808 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66809 $(build)=$(build-dir)
66810-%.ko: prepare scripts FORCE
66811+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
66812+%.ko: gcc-plugins prepare scripts FORCE
66813 $(cmd_crmodverdir)
66814 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
66815 $(build)=$(build-dir) $(@:.ko=.o)
66816diff -urNp linux-3.0.7/mm/filemap.c linux-3.0.7/mm/filemap.c
66817--- linux-3.0.7/mm/filemap.c 2011-07-21 22:17:23.000000000 -0400
66818+++ linux-3.0.7/mm/filemap.c 2011-08-23 21:48:14.000000000 -0400
66819@@ -1763,7 +1763,7 @@ int generic_file_mmap(struct file * file
66820 struct address_space *mapping = file->f_mapping;
66821
66822 if (!mapping->a_ops->readpage)
66823- return -ENOEXEC;
66824+ return -ENODEV;
66825 file_accessed(file);
66826 vma->vm_ops = &generic_file_vm_ops;
66827 vma->vm_flags |= VM_CAN_NONLINEAR;
66828@@ -2169,6 +2169,7 @@ inline int generic_write_checks(struct f
66829 *pos = i_size_read(inode);
66830
66831 if (limit != RLIM_INFINITY) {
66832+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66833 if (*pos >= limit) {
66834 send_sig(SIGXFSZ, current, 0);
66835 return -EFBIG;
66836diff -urNp linux-3.0.7/mm/fremap.c linux-3.0.7/mm/fremap.c
66837--- linux-3.0.7/mm/fremap.c 2011-07-21 22:17:23.000000000 -0400
66838+++ linux-3.0.7/mm/fremap.c 2011-08-23 21:47:56.000000000 -0400
66839@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66840 retry:
66841 vma = find_vma(mm, start);
66842
66843+#ifdef CONFIG_PAX_SEGMEXEC
66844+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66845+ goto out;
66846+#endif
66847+
66848 /*
66849 * Make sure the vma is shared, that it supports prefaulting,
66850 * and that the remapped range is valid and fully within
66851diff -urNp linux-3.0.7/mm/highmem.c linux-3.0.7/mm/highmem.c
66852--- linux-3.0.7/mm/highmem.c 2011-07-21 22:17:23.000000000 -0400
66853+++ linux-3.0.7/mm/highmem.c 2011-08-23 21:47:56.000000000 -0400
66854@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
66855 * So no dangers, even with speculative execution.
66856 */
66857 page = pte_page(pkmap_page_table[i]);
66858+ pax_open_kernel();
66859 pte_clear(&init_mm, (unsigned long)page_address(page),
66860 &pkmap_page_table[i]);
66861-
66862+ pax_close_kernel();
66863 set_page_address(page, NULL);
66864 need_flush = 1;
66865 }
66866@@ -186,9 +187,11 @@ start:
66867 }
66868 }
66869 vaddr = PKMAP_ADDR(last_pkmap_nr);
66870+
66871+ pax_open_kernel();
66872 set_pte_at(&init_mm, vaddr,
66873 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66874-
66875+ pax_close_kernel();
66876 pkmap_count[last_pkmap_nr] = 1;
66877 set_page_address(page, (void *)vaddr);
66878
66879diff -urNp linux-3.0.7/mm/huge_memory.c linux-3.0.7/mm/huge_memory.c
66880--- linux-3.0.7/mm/huge_memory.c 2011-07-21 22:17:23.000000000 -0400
66881+++ linux-3.0.7/mm/huge_memory.c 2011-08-23 21:47:56.000000000 -0400
66882@@ -702,7 +702,7 @@ out:
66883 * run pte_offset_map on the pmd, if an huge pmd could
66884 * materialize from under us from a different thread.
66885 */
66886- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
66887+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
66888 return VM_FAULT_OOM;
66889 /* if an huge pmd materialized from under us just retry later */
66890 if (unlikely(pmd_trans_huge(*pmd)))
66891diff -urNp linux-3.0.7/mm/hugetlb.c linux-3.0.7/mm/hugetlb.c
66892--- linux-3.0.7/mm/hugetlb.c 2011-07-21 22:17:23.000000000 -0400
66893+++ linux-3.0.7/mm/hugetlb.c 2011-08-23 21:47:56.000000000 -0400
66894@@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
66895 return 1;
66896 }
66897
66898+#ifdef CONFIG_PAX_SEGMEXEC
66899+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66900+{
66901+ struct mm_struct *mm = vma->vm_mm;
66902+ struct vm_area_struct *vma_m;
66903+ unsigned long address_m;
66904+ pte_t *ptep_m;
66905+
66906+ vma_m = pax_find_mirror_vma(vma);
66907+ if (!vma_m)
66908+ return;
66909+
66910+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66911+ address_m = address + SEGMEXEC_TASK_SIZE;
66912+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66913+ get_page(page_m);
66914+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
66915+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66916+}
66917+#endif
66918+
66919 /*
66920 * Hugetlb_cow() should be called with page lock of the original hugepage held.
66921 */
66922@@ -2440,6 +2461,11 @@ retry_avoidcopy:
66923 make_huge_pte(vma, new_page, 1));
66924 page_remove_rmap(old_page);
66925 hugepage_add_new_anon_rmap(new_page, vma, address);
66926+
66927+#ifdef CONFIG_PAX_SEGMEXEC
66928+ pax_mirror_huge_pte(vma, address, new_page);
66929+#endif
66930+
66931 /* Make the old page be freed below */
66932 new_page = old_page;
66933 mmu_notifier_invalidate_range_end(mm,
66934@@ -2591,6 +2617,10 @@ retry:
66935 && (vma->vm_flags & VM_SHARED)));
66936 set_huge_pte_at(mm, address, ptep, new_pte);
66937
66938+#ifdef CONFIG_PAX_SEGMEXEC
66939+ pax_mirror_huge_pte(vma, address, page);
66940+#endif
66941+
66942 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
66943 /* Optimization, do the COW without a second fault */
66944 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
66945@@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
66946 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
66947 struct hstate *h = hstate_vma(vma);
66948
66949+#ifdef CONFIG_PAX_SEGMEXEC
66950+ struct vm_area_struct *vma_m;
66951+#endif
66952+
66953 ptep = huge_pte_offset(mm, address);
66954 if (ptep) {
66955 entry = huge_ptep_get(ptep);
66956@@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
66957 VM_FAULT_SET_HINDEX(h - hstates);
66958 }
66959
66960+#ifdef CONFIG_PAX_SEGMEXEC
66961+ vma_m = pax_find_mirror_vma(vma);
66962+ if (vma_m) {
66963+ unsigned long address_m;
66964+
66965+ if (vma->vm_start > vma_m->vm_start) {
66966+ address_m = address;
66967+ address -= SEGMEXEC_TASK_SIZE;
66968+ vma = vma_m;
66969+ h = hstate_vma(vma);
66970+ } else
66971+ address_m = address + SEGMEXEC_TASK_SIZE;
66972+
66973+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
66974+ return VM_FAULT_OOM;
66975+ address_m &= HPAGE_MASK;
66976+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
66977+ }
66978+#endif
66979+
66980 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
66981 if (!ptep)
66982 return VM_FAULT_OOM;
66983diff -urNp linux-3.0.7/mm/internal.h linux-3.0.7/mm/internal.h
66984--- linux-3.0.7/mm/internal.h 2011-07-21 22:17:23.000000000 -0400
66985+++ linux-3.0.7/mm/internal.h 2011-08-23 21:47:56.000000000 -0400
66986@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
66987 * in mm/page_alloc.c
66988 */
66989 extern void __free_pages_bootmem(struct page *page, unsigned int order);
66990+extern void free_compound_page(struct page *page);
66991 extern void prep_compound_page(struct page *page, unsigned long order);
66992 #ifdef CONFIG_MEMORY_FAILURE
66993 extern bool is_free_buddy_page(struct page *page);
66994diff -urNp linux-3.0.7/mm/Kconfig linux-3.0.7/mm/Kconfig
66995--- linux-3.0.7/mm/Kconfig 2011-07-21 22:17:23.000000000 -0400
66996+++ linux-3.0.7/mm/Kconfig 2011-08-23 21:48:14.000000000 -0400
66997@@ -240,7 +240,7 @@ config KSM
66998 config DEFAULT_MMAP_MIN_ADDR
66999 int "Low address space to protect from user allocation"
67000 depends on MMU
67001- default 4096
67002+ default 65536
67003 help
67004 This is the portion of low virtual memory which should be protected
67005 from userspace allocation. Keeping a user from writing to low pages
67006diff -urNp linux-3.0.7/mm/kmemleak.c linux-3.0.7/mm/kmemleak.c
67007--- linux-3.0.7/mm/kmemleak.c 2011-07-21 22:17:23.000000000 -0400
67008+++ linux-3.0.7/mm/kmemleak.c 2011-08-23 21:48:14.000000000 -0400
67009@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67010
67011 for (i = 0; i < object->trace_len; i++) {
67012 void *ptr = (void *)object->trace[i];
67013- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67014+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67015 }
67016 }
67017
67018diff -urNp linux-3.0.7/mm/maccess.c linux-3.0.7/mm/maccess.c
67019--- linux-3.0.7/mm/maccess.c 2011-07-21 22:17:23.000000000 -0400
67020+++ linux-3.0.7/mm/maccess.c 2011-10-06 04:17:55.000000000 -0400
67021@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67022 set_fs(KERNEL_DS);
67023 pagefault_disable();
67024 ret = __copy_from_user_inatomic(dst,
67025- (__force const void __user *)src, size);
67026+ (const void __force_user *)src, size);
67027 pagefault_enable();
67028 set_fs(old_fs);
67029
67030@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67031
67032 set_fs(KERNEL_DS);
67033 pagefault_disable();
67034- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67035+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67036 pagefault_enable();
67037 set_fs(old_fs);
67038
67039diff -urNp linux-3.0.7/mm/madvise.c linux-3.0.7/mm/madvise.c
67040--- linux-3.0.7/mm/madvise.c 2011-07-21 22:17:23.000000000 -0400
67041+++ linux-3.0.7/mm/madvise.c 2011-08-23 21:47:56.000000000 -0400
67042@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67043 pgoff_t pgoff;
67044 unsigned long new_flags = vma->vm_flags;
67045
67046+#ifdef CONFIG_PAX_SEGMEXEC
67047+ struct vm_area_struct *vma_m;
67048+#endif
67049+
67050 switch (behavior) {
67051 case MADV_NORMAL:
67052 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67053@@ -110,6 +114,13 @@ success:
67054 /*
67055 * vm_flags is protected by the mmap_sem held in write mode.
67056 */
67057+
67058+#ifdef CONFIG_PAX_SEGMEXEC
67059+ vma_m = pax_find_mirror_vma(vma);
67060+ if (vma_m)
67061+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67062+#endif
67063+
67064 vma->vm_flags = new_flags;
67065
67066 out:
67067@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67068 struct vm_area_struct ** prev,
67069 unsigned long start, unsigned long end)
67070 {
67071+
67072+#ifdef CONFIG_PAX_SEGMEXEC
67073+ struct vm_area_struct *vma_m;
67074+#endif
67075+
67076 *prev = vma;
67077 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67078 return -EINVAL;
67079@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67080 zap_page_range(vma, start, end - start, &details);
67081 } else
67082 zap_page_range(vma, start, end - start, NULL);
67083+
67084+#ifdef CONFIG_PAX_SEGMEXEC
67085+ vma_m = pax_find_mirror_vma(vma);
67086+ if (vma_m) {
67087+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67088+ struct zap_details details = {
67089+ .nonlinear_vma = vma_m,
67090+ .last_index = ULONG_MAX,
67091+ };
67092+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67093+ } else
67094+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67095+ }
67096+#endif
67097+
67098 return 0;
67099 }
67100
67101@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67102 if (end < start)
67103 goto out;
67104
67105+#ifdef CONFIG_PAX_SEGMEXEC
67106+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67107+ if (end > SEGMEXEC_TASK_SIZE)
67108+ goto out;
67109+ } else
67110+#endif
67111+
67112+ if (end > TASK_SIZE)
67113+ goto out;
67114+
67115 error = 0;
67116 if (end == start)
67117 goto out;
67118diff -urNp linux-3.0.7/mm/memory.c linux-3.0.7/mm/memory.c
67119--- linux-3.0.7/mm/memory.c 2011-09-02 18:11:21.000000000 -0400
67120+++ linux-3.0.7/mm/memory.c 2011-08-23 21:47:56.000000000 -0400
67121@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67122 return;
67123
67124 pmd = pmd_offset(pud, start);
67125+
67126+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67127 pud_clear(pud);
67128 pmd_free_tlb(tlb, pmd, start);
67129+#endif
67130+
67131 }
67132
67133 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67134@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67135 if (end - 1 > ceiling - 1)
67136 return;
67137
67138+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67139 pud = pud_offset(pgd, start);
67140 pgd_clear(pgd);
67141 pud_free_tlb(tlb, pud, start);
67142+#endif
67143+
67144 }
67145
67146 /*
67147@@ -1577,12 +1584,6 @@ no_page_table:
67148 return page;
67149 }
67150
67151-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67152-{
67153- return stack_guard_page_start(vma, addr) ||
67154- stack_guard_page_end(vma, addr+PAGE_SIZE);
67155-}
67156-
67157 /**
67158 * __get_user_pages() - pin user pages in memory
67159 * @tsk: task_struct of target task
67160@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct
67161 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67162 i = 0;
67163
67164- do {
67165+ while (nr_pages) {
67166 struct vm_area_struct *vma;
67167
67168- vma = find_extend_vma(mm, start);
67169+ vma = find_vma(mm, start);
67170 if (!vma && in_gate_area(mm, start)) {
67171 unsigned long pg = start & PAGE_MASK;
67172 pgd_t *pgd;
67173@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct
67174 goto next_page;
67175 }
67176
67177- if (!vma ||
67178+ if (!vma || start < vma->vm_start ||
67179 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67180 !(vm_flags & vma->vm_flags))
67181 return i ? : -EFAULT;
67182@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct
67183 int ret;
67184 unsigned int fault_flags = 0;
67185
67186- /* For mlock, just skip the stack guard page. */
67187- if (foll_flags & FOLL_MLOCK) {
67188- if (stack_guard_page(vma, start))
67189- goto next_page;
67190- }
67191 if (foll_flags & FOLL_WRITE)
67192 fault_flags |= FAULT_FLAG_WRITE;
67193 if (nonblocking)
67194@@ -1811,7 +1807,7 @@ next_page:
67195 start += PAGE_SIZE;
67196 nr_pages--;
67197 } while (nr_pages && start < vma->vm_end);
67198- } while (nr_pages);
67199+ }
67200 return i;
67201 }
67202 EXPORT_SYMBOL(__get_user_pages);
67203@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_st
67204 page_add_file_rmap(page);
67205 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67206
67207+#ifdef CONFIG_PAX_SEGMEXEC
67208+ pax_mirror_file_pte(vma, addr, page, ptl);
67209+#endif
67210+
67211 retval = 0;
67212 pte_unmap_unlock(pte, ptl);
67213 return retval;
67214@@ -2052,10 +2052,22 @@ out:
67215 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67216 struct page *page)
67217 {
67218+
67219+#ifdef CONFIG_PAX_SEGMEXEC
67220+ struct vm_area_struct *vma_m;
67221+#endif
67222+
67223 if (addr < vma->vm_start || addr >= vma->vm_end)
67224 return -EFAULT;
67225 if (!page_count(page))
67226 return -EINVAL;
67227+
67228+#ifdef CONFIG_PAX_SEGMEXEC
67229+ vma_m = pax_find_mirror_vma(vma);
67230+ if (vma_m)
67231+ vma_m->vm_flags |= VM_INSERTPAGE;
67232+#endif
67233+
67234 vma->vm_flags |= VM_INSERTPAGE;
67235 return insert_page(vma, addr, page, vma->vm_page_prot);
67236 }
67237@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struc
67238 unsigned long pfn)
67239 {
67240 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67241+ BUG_ON(vma->vm_mirror);
67242
67243 if (addr < vma->vm_start || addr >= vma->vm_end)
67244 return -EFAULT;
67245@@ -2456,6 +2469,186 @@ static inline void cow_user_page(struct
67246 copy_user_highpage(dst, src, va, vma);
67247 }
67248
67249+#ifdef CONFIG_PAX_SEGMEXEC
67250+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67251+{
67252+ struct mm_struct *mm = vma->vm_mm;
67253+ spinlock_t *ptl;
67254+ pte_t *pte, entry;
67255+
67256+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67257+ entry = *pte;
67258+ if (!pte_present(entry)) {
67259+ if (!pte_none(entry)) {
67260+ BUG_ON(pte_file(entry));
67261+ free_swap_and_cache(pte_to_swp_entry(entry));
67262+ pte_clear_not_present_full(mm, address, pte, 0);
67263+ }
67264+ } else {
67265+ struct page *page;
67266+
67267+ flush_cache_page(vma, address, pte_pfn(entry));
67268+ entry = ptep_clear_flush(vma, address, pte);
67269+ BUG_ON(pte_dirty(entry));
67270+ page = vm_normal_page(vma, address, entry);
67271+ if (page) {
67272+ update_hiwater_rss(mm);
67273+ if (PageAnon(page))
67274+ dec_mm_counter_fast(mm, MM_ANONPAGES);
67275+ else
67276+ dec_mm_counter_fast(mm, MM_FILEPAGES);
67277+ page_remove_rmap(page);
67278+ page_cache_release(page);
67279+ }
67280+ }
67281+ pte_unmap_unlock(pte, ptl);
67282+}
67283+
67284+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67285+ *
67286+ * the ptl of the lower mapped page is held on entry and is not released on exit
67287+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67288+ */
67289+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67290+{
67291+ struct mm_struct *mm = vma->vm_mm;
67292+ unsigned long address_m;
67293+ spinlock_t *ptl_m;
67294+ struct vm_area_struct *vma_m;
67295+ pmd_t *pmd_m;
67296+ pte_t *pte_m, entry_m;
67297+
67298+ BUG_ON(!page_m || !PageAnon(page_m));
67299+
67300+ vma_m = pax_find_mirror_vma(vma);
67301+ if (!vma_m)
67302+ return;
67303+
67304+ BUG_ON(!PageLocked(page_m));
67305+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67306+ address_m = address + SEGMEXEC_TASK_SIZE;
67307+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67308+ pte_m = pte_offset_map(pmd_m, address_m);
67309+ ptl_m = pte_lockptr(mm, pmd_m);
67310+ if (ptl != ptl_m) {
67311+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67312+ if (!pte_none(*pte_m))
67313+ goto out;
67314+ }
67315+
67316+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67317+ page_cache_get(page_m);
67318+ page_add_anon_rmap(page_m, vma_m, address_m);
67319+ inc_mm_counter_fast(mm, MM_ANONPAGES);
67320+ set_pte_at(mm, address_m, pte_m, entry_m);
67321+ update_mmu_cache(vma_m, address_m, entry_m);
67322+out:
67323+ if (ptl != ptl_m)
67324+ spin_unlock(ptl_m);
67325+ pte_unmap(pte_m);
67326+ unlock_page(page_m);
67327+}
67328+
67329+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67330+{
67331+ struct mm_struct *mm = vma->vm_mm;
67332+ unsigned long address_m;
67333+ spinlock_t *ptl_m;
67334+ struct vm_area_struct *vma_m;
67335+ pmd_t *pmd_m;
67336+ pte_t *pte_m, entry_m;
67337+
67338+ BUG_ON(!page_m || PageAnon(page_m));
67339+
67340+ vma_m = pax_find_mirror_vma(vma);
67341+ if (!vma_m)
67342+ return;
67343+
67344+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67345+ address_m = address + SEGMEXEC_TASK_SIZE;
67346+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67347+ pte_m = pte_offset_map(pmd_m, address_m);
67348+ ptl_m = pte_lockptr(mm, pmd_m);
67349+ if (ptl != ptl_m) {
67350+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67351+ if (!pte_none(*pte_m))
67352+ goto out;
67353+ }
67354+
67355+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67356+ page_cache_get(page_m);
67357+ page_add_file_rmap(page_m);
67358+ inc_mm_counter_fast(mm, MM_FILEPAGES);
67359+ set_pte_at(mm, address_m, pte_m, entry_m);
67360+ update_mmu_cache(vma_m, address_m, entry_m);
67361+out:
67362+ if (ptl != ptl_m)
67363+ spin_unlock(ptl_m);
67364+ pte_unmap(pte_m);
67365+}
67366+
67367+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67368+{
67369+ struct mm_struct *mm = vma->vm_mm;
67370+ unsigned long address_m;
67371+ spinlock_t *ptl_m;
67372+ struct vm_area_struct *vma_m;
67373+ pmd_t *pmd_m;
67374+ pte_t *pte_m, entry_m;
67375+
67376+ vma_m = pax_find_mirror_vma(vma);
67377+ if (!vma_m)
67378+ return;
67379+
67380+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67381+ address_m = address + SEGMEXEC_TASK_SIZE;
67382+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67383+ pte_m = pte_offset_map(pmd_m, address_m);
67384+ ptl_m = pte_lockptr(mm, pmd_m);
67385+ if (ptl != ptl_m) {
67386+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67387+ if (!pte_none(*pte_m))
67388+ goto out;
67389+ }
67390+
67391+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67392+ set_pte_at(mm, address_m, pte_m, entry_m);
67393+out:
67394+ if (ptl != ptl_m)
67395+ spin_unlock(ptl_m);
67396+ pte_unmap(pte_m);
67397+}
67398+
67399+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67400+{
67401+ struct page *page_m;
67402+ pte_t entry;
67403+
67404+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67405+ goto out;
67406+
67407+ entry = *pte;
67408+ page_m = vm_normal_page(vma, address, entry);
67409+ if (!page_m)
67410+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67411+ else if (PageAnon(page_m)) {
67412+ if (pax_find_mirror_vma(vma)) {
67413+ pte_unmap_unlock(pte, ptl);
67414+ lock_page(page_m);
67415+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67416+ if (pte_same(entry, *pte))
67417+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67418+ else
67419+ unlock_page(page_m);
67420+ }
67421+ } else
67422+ pax_mirror_file_pte(vma, address, page_m, ptl);
67423+
67424+out:
67425+ pte_unmap_unlock(pte, ptl);
67426+}
67427+#endif
67428+
67429 /*
67430 * This routine handles present pages, when users try to write
67431 * to a shared page. It is done by copying the page to a new address
67432@@ -2667,6 +2860,12 @@ gotten:
67433 */
67434 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67435 if (likely(pte_same(*page_table, orig_pte))) {
67436+
67437+#ifdef CONFIG_PAX_SEGMEXEC
67438+ if (pax_find_mirror_vma(vma))
67439+ BUG_ON(!trylock_page(new_page));
67440+#endif
67441+
67442 if (old_page) {
67443 if (!PageAnon(old_page)) {
67444 dec_mm_counter_fast(mm, MM_FILEPAGES);
67445@@ -2718,6 +2917,10 @@ gotten:
67446 page_remove_rmap(old_page);
67447 }
67448
67449+#ifdef CONFIG_PAX_SEGMEXEC
67450+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67451+#endif
67452+
67453 /* Free the old page.. */
67454 new_page = old_page;
67455 ret |= VM_FAULT_WRITE;
67456@@ -2997,6 +3200,11 @@ static int do_swap_page(struct mm_struct
67457 swap_free(entry);
67458 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67459 try_to_free_swap(page);
67460+
67461+#ifdef CONFIG_PAX_SEGMEXEC
67462+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67463+#endif
67464+
67465 unlock_page(page);
67466 if (swapcache) {
67467 /*
67468@@ -3020,6 +3228,11 @@ static int do_swap_page(struct mm_struct
67469
67470 /* No need to invalidate - it was non-present before */
67471 update_mmu_cache(vma, address, page_table);
67472+
67473+#ifdef CONFIG_PAX_SEGMEXEC
67474+ pax_mirror_anon_pte(vma, address, page, ptl);
67475+#endif
67476+
67477 unlock:
67478 pte_unmap_unlock(page_table, ptl);
67479 out:
67480@@ -3039,40 +3252,6 @@ out_release:
67481 }
67482
67483 /*
67484- * This is like a special single-page "expand_{down|up}wards()",
67485- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67486- * doesn't hit another vma.
67487- */
67488-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67489-{
67490- address &= PAGE_MASK;
67491- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67492- struct vm_area_struct *prev = vma->vm_prev;
67493-
67494- /*
67495- * Is there a mapping abutting this one below?
67496- *
67497- * That's only ok if it's the same stack mapping
67498- * that has gotten split..
67499- */
67500- if (prev && prev->vm_end == address)
67501- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67502-
67503- expand_downwards(vma, address - PAGE_SIZE);
67504- }
67505- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67506- struct vm_area_struct *next = vma->vm_next;
67507-
67508- /* As VM_GROWSDOWN but s/below/above/ */
67509- if (next && next->vm_start == address + PAGE_SIZE)
67510- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67511-
67512- expand_upwards(vma, address + PAGE_SIZE);
67513- }
67514- return 0;
67515-}
67516-
67517-/*
67518 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67519 * but allow concurrent faults), and pte mapped but not yet locked.
67520 * We return with mmap_sem still held, but pte unmapped and unlocked.
67521@@ -3081,27 +3260,23 @@ static int do_anonymous_page(struct mm_s
67522 unsigned long address, pte_t *page_table, pmd_t *pmd,
67523 unsigned int flags)
67524 {
67525- struct page *page;
67526+ struct page *page = NULL;
67527 spinlock_t *ptl;
67528 pte_t entry;
67529
67530- pte_unmap(page_table);
67531-
67532- /* Check if we need to add a guard page to the stack */
67533- if (check_stack_guard_page(vma, address) < 0)
67534- return VM_FAULT_SIGBUS;
67535-
67536- /* Use the zero-page for reads */
67537 if (!(flags & FAULT_FLAG_WRITE)) {
67538 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67539 vma->vm_page_prot));
67540- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67541+ ptl = pte_lockptr(mm, pmd);
67542+ spin_lock(ptl);
67543 if (!pte_none(*page_table))
67544 goto unlock;
67545 goto setpte;
67546 }
67547
67548 /* Allocate our own private page. */
67549+ pte_unmap(page_table);
67550+
67551 if (unlikely(anon_vma_prepare(vma)))
67552 goto oom;
67553 page = alloc_zeroed_user_highpage_movable(vma, address);
67554@@ -3120,6 +3295,11 @@ static int do_anonymous_page(struct mm_s
67555 if (!pte_none(*page_table))
67556 goto release;
67557
67558+#ifdef CONFIG_PAX_SEGMEXEC
67559+ if (pax_find_mirror_vma(vma))
67560+ BUG_ON(!trylock_page(page));
67561+#endif
67562+
67563 inc_mm_counter_fast(mm, MM_ANONPAGES);
67564 page_add_new_anon_rmap(page, vma, address);
67565 setpte:
67566@@ -3127,6 +3307,12 @@ setpte:
67567
67568 /* No need to invalidate - it was non-present before */
67569 update_mmu_cache(vma, address, page_table);
67570+
67571+#ifdef CONFIG_PAX_SEGMEXEC
67572+ if (page)
67573+ pax_mirror_anon_pte(vma, address, page, ptl);
67574+#endif
67575+
67576 unlock:
67577 pte_unmap_unlock(page_table, ptl);
67578 return 0;
67579@@ -3264,6 +3450,12 @@ static int __do_fault(struct mm_struct *
67580 */
67581 /* Only go through if we didn't race with anybody else... */
67582 if (likely(pte_same(*page_table, orig_pte))) {
67583+
67584+#ifdef CONFIG_PAX_SEGMEXEC
67585+ if (anon && pax_find_mirror_vma(vma))
67586+ BUG_ON(!trylock_page(page));
67587+#endif
67588+
67589 flush_icache_page(vma, page);
67590 entry = mk_pte(page, vma->vm_page_prot);
67591 if (flags & FAULT_FLAG_WRITE)
67592@@ -3283,6 +3475,14 @@ static int __do_fault(struct mm_struct *
67593
67594 /* no need to invalidate: a not-present page won't be cached */
67595 update_mmu_cache(vma, address, page_table);
67596+
67597+#ifdef CONFIG_PAX_SEGMEXEC
67598+ if (anon)
67599+ pax_mirror_anon_pte(vma, address, page, ptl);
67600+ else
67601+ pax_mirror_file_pte(vma, address, page, ptl);
67602+#endif
67603+
67604 } else {
67605 if (charged)
67606 mem_cgroup_uncharge_page(page);
67607@@ -3430,6 +3630,12 @@ int handle_pte_fault(struct mm_struct *m
67608 if (flags & FAULT_FLAG_WRITE)
67609 flush_tlb_fix_spurious_fault(vma, address);
67610 }
67611+
67612+#ifdef CONFIG_PAX_SEGMEXEC
67613+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67614+ return 0;
67615+#endif
67616+
67617 unlock:
67618 pte_unmap_unlock(pte, ptl);
67619 return 0;
67620@@ -3446,6 +3652,10 @@ int handle_mm_fault(struct mm_struct *mm
67621 pmd_t *pmd;
67622 pte_t *pte;
67623
67624+#ifdef CONFIG_PAX_SEGMEXEC
67625+ struct vm_area_struct *vma_m;
67626+#endif
67627+
67628 __set_current_state(TASK_RUNNING);
67629
67630 count_vm_event(PGFAULT);
67631@@ -3457,6 +3667,34 @@ int handle_mm_fault(struct mm_struct *mm
67632 if (unlikely(is_vm_hugetlb_page(vma)))
67633 return hugetlb_fault(mm, vma, address, flags);
67634
67635+#ifdef CONFIG_PAX_SEGMEXEC
67636+ vma_m = pax_find_mirror_vma(vma);
67637+ if (vma_m) {
67638+ unsigned long address_m;
67639+ pgd_t *pgd_m;
67640+ pud_t *pud_m;
67641+ pmd_t *pmd_m;
67642+
67643+ if (vma->vm_start > vma_m->vm_start) {
67644+ address_m = address;
67645+ address -= SEGMEXEC_TASK_SIZE;
67646+ vma = vma_m;
67647+ } else
67648+ address_m = address + SEGMEXEC_TASK_SIZE;
67649+
67650+ pgd_m = pgd_offset(mm, address_m);
67651+ pud_m = pud_alloc(mm, pgd_m, address_m);
67652+ if (!pud_m)
67653+ return VM_FAULT_OOM;
67654+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67655+ if (!pmd_m)
67656+ return VM_FAULT_OOM;
67657+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
67658+ return VM_FAULT_OOM;
67659+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67660+ }
67661+#endif
67662+
67663 pgd = pgd_offset(mm, address);
67664 pud = pud_alloc(mm, pgd, address);
67665 if (!pud)
67666@@ -3486,7 +3724,7 @@ int handle_mm_fault(struct mm_struct *mm
67667 * run pte_offset_map on the pmd, if an huge pmd could
67668 * materialize from under us from a different thread.
67669 */
67670- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
67671+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67672 return VM_FAULT_OOM;
67673 /* if an huge pmd materialized from under us just retry later */
67674 if (unlikely(pmd_trans_huge(*pmd)))
67675@@ -3590,7 +3828,7 @@ static int __init gate_vma_init(void)
67676 gate_vma.vm_start = FIXADDR_USER_START;
67677 gate_vma.vm_end = FIXADDR_USER_END;
67678 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67679- gate_vma.vm_page_prot = __P101;
67680+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67681 /*
67682 * Make sure the vDSO gets into every core dump.
67683 * Dumping its contents makes post-mortem fully interpretable later
67684diff -urNp linux-3.0.7/mm/memory-failure.c linux-3.0.7/mm/memory-failure.c
67685--- linux-3.0.7/mm/memory-failure.c 2011-07-21 22:17:23.000000000 -0400
67686+++ linux-3.0.7/mm/memory-failure.c 2011-10-06 04:17:55.000000000 -0400
67687@@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
67688
67689 int sysctl_memory_failure_recovery __read_mostly = 1;
67690
67691-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67692+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67693
67694 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67695
67696@@ -200,7 +200,7 @@ static int kill_proc_ao(struct task_stru
67697 si.si_signo = SIGBUS;
67698 si.si_errno = 0;
67699 si.si_code = BUS_MCEERR_AO;
67700- si.si_addr = (void *)addr;
67701+ si.si_addr = (void __user *)addr;
67702 #ifdef __ARCH_SI_TRAPNO
67703 si.si_trapno = trapno;
67704 #endif
67705@@ -1008,7 +1008,7 @@ int __memory_failure(unsigned long pfn,
67706 }
67707
67708 nr_pages = 1 << compound_trans_order(hpage);
67709- atomic_long_add(nr_pages, &mce_bad_pages);
67710+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67711
67712 /*
67713 * We need/can do nothing about count=0 pages.
67714@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn,
67715 if (!PageHWPoison(hpage)
67716 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67717 || (p != hpage && TestSetPageHWPoison(hpage))) {
67718- atomic_long_sub(nr_pages, &mce_bad_pages);
67719+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67720 return 0;
67721 }
67722 set_page_hwpoison_huge_page(hpage);
67723@@ -1096,7 +1096,7 @@ int __memory_failure(unsigned long pfn,
67724 }
67725 if (hwpoison_filter(p)) {
67726 if (TestClearPageHWPoison(p))
67727- atomic_long_sub(nr_pages, &mce_bad_pages);
67728+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67729 unlock_page(hpage);
67730 put_page(hpage);
67731 return 0;
67732@@ -1222,7 +1222,7 @@ int unpoison_memory(unsigned long pfn)
67733 return 0;
67734 }
67735 if (TestClearPageHWPoison(p))
67736- atomic_long_sub(nr_pages, &mce_bad_pages);
67737+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67738 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67739 return 0;
67740 }
67741@@ -1236,7 +1236,7 @@ int unpoison_memory(unsigned long pfn)
67742 */
67743 if (TestClearPageHWPoison(page)) {
67744 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67745- atomic_long_sub(nr_pages, &mce_bad_pages);
67746+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67747 freeit = 1;
67748 if (PageHuge(page))
67749 clear_page_hwpoison_huge_page(page);
67750@@ -1349,7 +1349,7 @@ static int soft_offline_huge_page(struct
67751 }
67752 done:
67753 if (!PageHWPoison(hpage))
67754- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67755+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67756 set_page_hwpoison_huge_page(hpage);
67757 dequeue_hwpoisoned_huge_page(hpage);
67758 /* keep elevated page count for bad page */
67759@@ -1480,7 +1480,7 @@ int soft_offline_page(struct page *page,
67760 return ret;
67761
67762 done:
67763- atomic_long_add(1, &mce_bad_pages);
67764+ atomic_long_add_unchecked(1, &mce_bad_pages);
67765 SetPageHWPoison(page);
67766 /* keep elevated page count for bad page */
67767 return ret;
67768diff -urNp linux-3.0.7/mm/mempolicy.c linux-3.0.7/mm/mempolicy.c
67769--- linux-3.0.7/mm/mempolicy.c 2011-07-21 22:17:23.000000000 -0400
67770+++ linux-3.0.7/mm/mempolicy.c 2011-08-23 21:48:14.000000000 -0400
67771@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
67772 unsigned long vmstart;
67773 unsigned long vmend;
67774
67775+#ifdef CONFIG_PAX_SEGMEXEC
67776+ struct vm_area_struct *vma_m;
67777+#endif
67778+
67779 vma = find_vma_prev(mm, start, &prev);
67780 if (!vma || vma->vm_start > start)
67781 return -EFAULT;
67782@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
67783 err = policy_vma(vma, new_pol);
67784 if (err)
67785 goto out;
67786+
67787+#ifdef CONFIG_PAX_SEGMEXEC
67788+ vma_m = pax_find_mirror_vma(vma);
67789+ if (vma_m) {
67790+ err = policy_vma(vma_m, new_pol);
67791+ if (err)
67792+ goto out;
67793+ }
67794+#endif
67795+
67796 }
67797
67798 out:
67799@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
67800
67801 if (end < start)
67802 return -EINVAL;
67803+
67804+#ifdef CONFIG_PAX_SEGMEXEC
67805+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67806+ if (end > SEGMEXEC_TASK_SIZE)
67807+ return -EINVAL;
67808+ } else
67809+#endif
67810+
67811+ if (end > TASK_SIZE)
67812+ return -EINVAL;
67813+
67814 if (end == start)
67815 return 0;
67816
67817@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67818 if (!mm)
67819 goto out;
67820
67821+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67822+ if (mm != current->mm &&
67823+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67824+ err = -EPERM;
67825+ goto out;
67826+ }
67827+#endif
67828+
67829 /*
67830 * Check if this process has the right to modify the specified
67831 * process. The right exists if the process has administrative
67832@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67833 rcu_read_lock();
67834 tcred = __task_cred(task);
67835 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67836- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67837- !capable(CAP_SYS_NICE)) {
67838+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67839 rcu_read_unlock();
67840 err = -EPERM;
67841 goto out;
67842diff -urNp linux-3.0.7/mm/migrate.c linux-3.0.7/mm/migrate.c
67843--- linux-3.0.7/mm/migrate.c 2011-07-21 22:17:23.000000000 -0400
67844+++ linux-3.0.7/mm/migrate.c 2011-08-23 21:48:14.000000000 -0400
67845@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
67846 unsigned long chunk_start;
67847 int err;
67848
67849+ pax_track_stack();
67850+
67851 task_nodes = cpuset_mems_allowed(task);
67852
67853 err = -ENOMEM;
67854@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67855 if (!mm)
67856 return -EINVAL;
67857
67858+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67859+ if (mm != current->mm &&
67860+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67861+ err = -EPERM;
67862+ goto out;
67863+ }
67864+#endif
67865+
67866 /*
67867 * Check if this process has the right to modify the specified
67868 * process. The right exists if the process has administrative
67869@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67870 rcu_read_lock();
67871 tcred = __task_cred(task);
67872 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67873- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67874- !capable(CAP_SYS_NICE)) {
67875+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67876 rcu_read_unlock();
67877 err = -EPERM;
67878 goto out;
67879diff -urNp linux-3.0.7/mm/mlock.c linux-3.0.7/mm/mlock.c
67880--- linux-3.0.7/mm/mlock.c 2011-07-21 22:17:23.000000000 -0400
67881+++ linux-3.0.7/mm/mlock.c 2011-08-23 21:48:14.000000000 -0400
67882@@ -13,6 +13,7 @@
67883 #include <linux/pagemap.h>
67884 #include <linux/mempolicy.h>
67885 #include <linux/syscalls.h>
67886+#include <linux/security.h>
67887 #include <linux/sched.h>
67888 #include <linux/module.h>
67889 #include <linux/rmap.h>
67890@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
67891 return -EINVAL;
67892 if (end == start)
67893 return 0;
67894+ if (end > TASK_SIZE)
67895+ return -EINVAL;
67896+
67897 vma = find_vma_prev(current->mm, start, &prev);
67898 if (!vma || vma->vm_start > start)
67899 return -ENOMEM;
67900@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
67901 for (nstart = start ; ; ) {
67902 vm_flags_t newflags;
67903
67904+#ifdef CONFIG_PAX_SEGMEXEC
67905+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67906+ break;
67907+#endif
67908+
67909 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67910
67911 newflags = vma->vm_flags | VM_LOCKED;
67912@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67913 lock_limit >>= PAGE_SHIFT;
67914
67915 /* check against resource limits */
67916+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67917 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67918 error = do_mlock(start, len, 1);
67919 up_write(&current->mm->mmap_sem);
67920@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67921 static int do_mlockall(int flags)
67922 {
67923 struct vm_area_struct * vma, * prev = NULL;
67924- unsigned int def_flags = 0;
67925
67926 if (flags & MCL_FUTURE)
67927- def_flags = VM_LOCKED;
67928- current->mm->def_flags = def_flags;
67929+ current->mm->def_flags |= VM_LOCKED;
67930+ else
67931+ current->mm->def_flags &= ~VM_LOCKED;
67932 if (flags == MCL_FUTURE)
67933 goto out;
67934
67935 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67936 vm_flags_t newflags;
67937
67938+#ifdef CONFIG_PAX_SEGMEXEC
67939+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67940+ break;
67941+#endif
67942+
67943+ BUG_ON(vma->vm_end > TASK_SIZE);
67944 newflags = vma->vm_flags | VM_LOCKED;
67945 if (!(flags & MCL_CURRENT))
67946 newflags &= ~VM_LOCKED;
67947@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67948 lock_limit >>= PAGE_SHIFT;
67949
67950 ret = -ENOMEM;
67951+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67952 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67953 capable(CAP_IPC_LOCK))
67954 ret = do_mlockall(flags);
67955diff -urNp linux-3.0.7/mm/mmap.c linux-3.0.7/mm/mmap.c
67956--- linux-3.0.7/mm/mmap.c 2011-07-21 22:17:23.000000000 -0400
67957+++ linux-3.0.7/mm/mmap.c 2011-08-23 21:48:14.000000000 -0400
67958@@ -46,6 +46,16 @@
67959 #define arch_rebalance_pgtables(addr, len) (addr)
67960 #endif
67961
67962+static inline void verify_mm_writelocked(struct mm_struct *mm)
67963+{
67964+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67965+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67966+ up_read(&mm->mmap_sem);
67967+ BUG();
67968+ }
67969+#endif
67970+}
67971+
67972 static void unmap_region(struct mm_struct *mm,
67973 struct vm_area_struct *vma, struct vm_area_struct *prev,
67974 unsigned long start, unsigned long end);
67975@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
67976 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67977 *
67978 */
67979-pgprot_t protection_map[16] = {
67980+pgprot_t protection_map[16] __read_only = {
67981 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67982 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67983 };
67984
67985-pgprot_t vm_get_page_prot(unsigned long vm_flags)
67986+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
67987 {
67988- return __pgprot(pgprot_val(protection_map[vm_flags &
67989+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67990 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67991 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67992+
67993+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67994+ if (!(__supported_pte_mask & _PAGE_NX) &&
67995+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
67996+ (vm_flags & (VM_READ | VM_WRITE)))
67997+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
67998+#endif
67999+
68000+ return prot;
68001 }
68002 EXPORT_SYMBOL(vm_get_page_prot);
68003
68004 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68005 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68006 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68007+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68008 /*
68009 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68010 * other variables. It can be updated by several CPUs frequently.
68011@@ -236,6 +256,7 @@ static struct vm_area_struct *remove_vma
68012 struct vm_area_struct *next = vma->vm_next;
68013
68014 might_sleep();
68015+ BUG_ON(vma->vm_mirror);
68016 if (vma->vm_ops && vma->vm_ops->close)
68017 vma->vm_ops->close(vma);
68018 if (vma->vm_file) {
68019@@ -280,6 +301,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68020 * not page aligned -Ram Gupta
68021 */
68022 rlim = rlimit(RLIMIT_DATA);
68023+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68024 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68025 (mm->end_data - mm->start_data) > rlim)
68026 goto out;
68027@@ -697,6 +719,12 @@ static int
68028 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68029 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68030 {
68031+
68032+#ifdef CONFIG_PAX_SEGMEXEC
68033+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68034+ return 0;
68035+#endif
68036+
68037 if (is_mergeable_vma(vma, file, vm_flags) &&
68038 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68039 if (vma->vm_pgoff == vm_pgoff)
68040@@ -716,6 +744,12 @@ static int
68041 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68042 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68043 {
68044+
68045+#ifdef CONFIG_PAX_SEGMEXEC
68046+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68047+ return 0;
68048+#endif
68049+
68050 if (is_mergeable_vma(vma, file, vm_flags) &&
68051 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68052 pgoff_t vm_pglen;
68053@@ -758,13 +792,20 @@ can_vma_merge_after(struct vm_area_struc
68054 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68055 struct vm_area_struct *prev, unsigned long addr,
68056 unsigned long end, unsigned long vm_flags,
68057- struct anon_vma *anon_vma, struct file *file,
68058+ struct anon_vma *anon_vma, struct file *file,
68059 pgoff_t pgoff, struct mempolicy *policy)
68060 {
68061 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68062 struct vm_area_struct *area, *next;
68063 int err;
68064
68065+#ifdef CONFIG_PAX_SEGMEXEC
68066+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68067+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68068+
68069+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68070+#endif
68071+
68072 /*
68073 * We later require that vma->vm_flags == vm_flags,
68074 * so this tests vma->vm_flags & VM_SPECIAL, too.
68075@@ -780,6 +821,15 @@ struct vm_area_struct *vma_merge(struct
68076 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68077 next = next->vm_next;
68078
68079+#ifdef CONFIG_PAX_SEGMEXEC
68080+ if (prev)
68081+ prev_m = pax_find_mirror_vma(prev);
68082+ if (area)
68083+ area_m = pax_find_mirror_vma(area);
68084+ if (next)
68085+ next_m = pax_find_mirror_vma(next);
68086+#endif
68087+
68088 /*
68089 * Can it merge with the predecessor?
68090 */
68091@@ -799,9 +849,24 @@ struct vm_area_struct *vma_merge(struct
68092 /* cases 1, 6 */
68093 err = vma_adjust(prev, prev->vm_start,
68094 next->vm_end, prev->vm_pgoff, NULL);
68095- } else /* cases 2, 5, 7 */
68096+
68097+#ifdef CONFIG_PAX_SEGMEXEC
68098+ if (!err && prev_m)
68099+ err = vma_adjust(prev_m, prev_m->vm_start,
68100+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68101+#endif
68102+
68103+ } else { /* cases 2, 5, 7 */
68104 err = vma_adjust(prev, prev->vm_start,
68105 end, prev->vm_pgoff, NULL);
68106+
68107+#ifdef CONFIG_PAX_SEGMEXEC
68108+ if (!err && prev_m)
68109+ err = vma_adjust(prev_m, prev_m->vm_start,
68110+ end_m, prev_m->vm_pgoff, NULL);
68111+#endif
68112+
68113+ }
68114 if (err)
68115 return NULL;
68116 khugepaged_enter_vma_merge(prev);
68117@@ -815,12 +880,27 @@ struct vm_area_struct *vma_merge(struct
68118 mpol_equal(policy, vma_policy(next)) &&
68119 can_vma_merge_before(next, vm_flags,
68120 anon_vma, file, pgoff+pglen)) {
68121- if (prev && addr < prev->vm_end) /* case 4 */
68122+ if (prev && addr < prev->vm_end) { /* case 4 */
68123 err = vma_adjust(prev, prev->vm_start,
68124 addr, prev->vm_pgoff, NULL);
68125- else /* cases 3, 8 */
68126+
68127+#ifdef CONFIG_PAX_SEGMEXEC
68128+ if (!err && prev_m)
68129+ err = vma_adjust(prev_m, prev_m->vm_start,
68130+ addr_m, prev_m->vm_pgoff, NULL);
68131+#endif
68132+
68133+ } else { /* cases 3, 8 */
68134 err = vma_adjust(area, addr, next->vm_end,
68135 next->vm_pgoff - pglen, NULL);
68136+
68137+#ifdef CONFIG_PAX_SEGMEXEC
68138+ if (!err && area_m)
68139+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68140+ next_m->vm_pgoff - pglen, NULL);
68141+#endif
68142+
68143+ }
68144 if (err)
68145 return NULL;
68146 khugepaged_enter_vma_merge(area);
68147@@ -929,14 +1009,11 @@ none:
68148 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68149 struct file *file, long pages)
68150 {
68151- const unsigned long stack_flags
68152- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68153-
68154 if (file) {
68155 mm->shared_vm += pages;
68156 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68157 mm->exec_vm += pages;
68158- } else if (flags & stack_flags)
68159+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68160 mm->stack_vm += pages;
68161 if (flags & (VM_RESERVED|VM_IO))
68162 mm->reserved_vm += pages;
68163@@ -963,7 +1040,7 @@ unsigned long do_mmap_pgoff(struct file
68164 * (the exception is when the underlying filesystem is noexec
68165 * mounted, in which case we dont add PROT_EXEC.)
68166 */
68167- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68168+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68169 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68170 prot |= PROT_EXEC;
68171
68172@@ -989,7 +1066,7 @@ unsigned long do_mmap_pgoff(struct file
68173 /* Obtain the address to map to. we verify (or select) it and ensure
68174 * that it represents a valid section of the address space.
68175 */
68176- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68177+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68178 if (addr & ~PAGE_MASK)
68179 return addr;
68180
68181@@ -1000,6 +1077,36 @@ unsigned long do_mmap_pgoff(struct file
68182 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68183 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68184
68185+#ifdef CONFIG_PAX_MPROTECT
68186+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68187+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68188+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68189+ gr_log_rwxmmap(file);
68190+
68191+#ifdef CONFIG_PAX_EMUPLT
68192+ vm_flags &= ~VM_EXEC;
68193+#else
68194+ return -EPERM;
68195+#endif
68196+
68197+ }
68198+
68199+ if (!(vm_flags & VM_EXEC))
68200+ vm_flags &= ~VM_MAYEXEC;
68201+#else
68202+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68203+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68204+#endif
68205+ else
68206+ vm_flags &= ~VM_MAYWRITE;
68207+ }
68208+#endif
68209+
68210+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68211+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68212+ vm_flags &= ~VM_PAGEEXEC;
68213+#endif
68214+
68215 if (flags & MAP_LOCKED)
68216 if (!can_do_mlock())
68217 return -EPERM;
68218@@ -1011,6 +1118,7 @@ unsigned long do_mmap_pgoff(struct file
68219 locked += mm->locked_vm;
68220 lock_limit = rlimit(RLIMIT_MEMLOCK);
68221 lock_limit >>= PAGE_SHIFT;
68222+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68223 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68224 return -EAGAIN;
68225 }
68226@@ -1081,6 +1189,9 @@ unsigned long do_mmap_pgoff(struct file
68227 if (error)
68228 return error;
68229
68230+ if (!gr_acl_handle_mmap(file, prot))
68231+ return -EACCES;
68232+
68233 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68234 }
68235 EXPORT_SYMBOL(do_mmap_pgoff);
68236@@ -1161,7 +1272,7 @@ int vma_wants_writenotify(struct vm_area
68237 vm_flags_t vm_flags = vma->vm_flags;
68238
68239 /* If it was private or non-writable, the write bit is already clear */
68240- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68241+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68242 return 0;
68243
68244 /* The backer wishes to know when pages are first written to? */
68245@@ -1210,14 +1321,24 @@ unsigned long mmap_region(struct file *f
68246 unsigned long charged = 0;
68247 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68248
68249+#ifdef CONFIG_PAX_SEGMEXEC
68250+ struct vm_area_struct *vma_m = NULL;
68251+#endif
68252+
68253+ /*
68254+ * mm->mmap_sem is required to protect against another thread
68255+ * changing the mappings in case we sleep.
68256+ */
68257+ verify_mm_writelocked(mm);
68258+
68259 /* Clear old maps */
68260 error = -ENOMEM;
68261-munmap_back:
68262 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68263 if (vma && vma->vm_start < addr + len) {
68264 if (do_munmap(mm, addr, len))
68265 return -ENOMEM;
68266- goto munmap_back;
68267+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68268+ BUG_ON(vma && vma->vm_start < addr + len);
68269 }
68270
68271 /* Check against address space limit. */
68272@@ -1266,6 +1387,16 @@ munmap_back:
68273 goto unacct_error;
68274 }
68275
68276+#ifdef CONFIG_PAX_SEGMEXEC
68277+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68278+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68279+ if (!vma_m) {
68280+ error = -ENOMEM;
68281+ goto free_vma;
68282+ }
68283+ }
68284+#endif
68285+
68286 vma->vm_mm = mm;
68287 vma->vm_start = addr;
68288 vma->vm_end = addr + len;
68289@@ -1289,6 +1420,19 @@ munmap_back:
68290 error = file->f_op->mmap(file, vma);
68291 if (error)
68292 goto unmap_and_free_vma;
68293+
68294+#ifdef CONFIG_PAX_SEGMEXEC
68295+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68296+ added_exe_file_vma(mm);
68297+#endif
68298+
68299+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68300+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68301+ vma->vm_flags |= VM_PAGEEXEC;
68302+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68303+ }
68304+#endif
68305+
68306 if (vm_flags & VM_EXECUTABLE)
68307 added_exe_file_vma(mm);
68308
68309@@ -1324,6 +1468,11 @@ munmap_back:
68310 vma_link(mm, vma, prev, rb_link, rb_parent);
68311 file = vma->vm_file;
68312
68313+#ifdef CONFIG_PAX_SEGMEXEC
68314+ if (vma_m)
68315+ BUG_ON(pax_mirror_vma(vma_m, vma));
68316+#endif
68317+
68318 /* Once vma denies write, undo our temporary denial count */
68319 if (correct_wcount)
68320 atomic_inc(&inode->i_writecount);
68321@@ -1332,6 +1481,7 @@ out:
68322
68323 mm->total_vm += len >> PAGE_SHIFT;
68324 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68325+ track_exec_limit(mm, addr, addr + len, vm_flags);
68326 if (vm_flags & VM_LOCKED) {
68327 if (!mlock_vma_pages_range(vma, addr, addr + len))
68328 mm->locked_vm += (len >> PAGE_SHIFT);
68329@@ -1349,6 +1499,12 @@ unmap_and_free_vma:
68330 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68331 charged = 0;
68332 free_vma:
68333+
68334+#ifdef CONFIG_PAX_SEGMEXEC
68335+ if (vma_m)
68336+ kmem_cache_free(vm_area_cachep, vma_m);
68337+#endif
68338+
68339 kmem_cache_free(vm_area_cachep, vma);
68340 unacct_error:
68341 if (charged)
68342@@ -1356,6 +1512,44 @@ unacct_error:
68343 return error;
68344 }
68345
68346+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68347+{
68348+ if (!vma) {
68349+#ifdef CONFIG_STACK_GROWSUP
68350+ if (addr > sysctl_heap_stack_gap)
68351+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68352+ else
68353+ vma = find_vma(current->mm, 0);
68354+ if (vma && (vma->vm_flags & VM_GROWSUP))
68355+ return false;
68356+#endif
68357+ return true;
68358+ }
68359+
68360+ if (addr + len > vma->vm_start)
68361+ return false;
68362+
68363+ if (vma->vm_flags & VM_GROWSDOWN)
68364+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68365+#ifdef CONFIG_STACK_GROWSUP
68366+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68367+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68368+#endif
68369+
68370+ return true;
68371+}
68372+
68373+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68374+{
68375+ if (vma->vm_start < len)
68376+ return -ENOMEM;
68377+ if (!(vma->vm_flags & VM_GROWSDOWN))
68378+ return vma->vm_start - len;
68379+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68380+ return vma->vm_start - len - sysctl_heap_stack_gap;
68381+ return -ENOMEM;
68382+}
68383+
68384 /* Get an address range which is currently unmapped.
68385 * For shmat() with addr=0.
68386 *
68387@@ -1382,18 +1576,23 @@ arch_get_unmapped_area(struct file *filp
68388 if (flags & MAP_FIXED)
68389 return addr;
68390
68391+#ifdef CONFIG_PAX_RANDMMAP
68392+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68393+#endif
68394+
68395 if (addr) {
68396 addr = PAGE_ALIGN(addr);
68397- vma = find_vma(mm, addr);
68398- if (TASK_SIZE - len >= addr &&
68399- (!vma || addr + len <= vma->vm_start))
68400- return addr;
68401+ if (TASK_SIZE - len >= addr) {
68402+ vma = find_vma(mm, addr);
68403+ if (check_heap_stack_gap(vma, addr, len))
68404+ return addr;
68405+ }
68406 }
68407 if (len > mm->cached_hole_size) {
68408- start_addr = addr = mm->free_area_cache;
68409+ start_addr = addr = mm->free_area_cache;
68410 } else {
68411- start_addr = addr = TASK_UNMAPPED_BASE;
68412- mm->cached_hole_size = 0;
68413+ start_addr = addr = mm->mmap_base;
68414+ mm->cached_hole_size = 0;
68415 }
68416
68417 full_search:
68418@@ -1404,34 +1603,40 @@ full_search:
68419 * Start a new search - just in case we missed
68420 * some holes.
68421 */
68422- if (start_addr != TASK_UNMAPPED_BASE) {
68423- addr = TASK_UNMAPPED_BASE;
68424- start_addr = addr;
68425+ if (start_addr != mm->mmap_base) {
68426+ start_addr = addr = mm->mmap_base;
68427 mm->cached_hole_size = 0;
68428 goto full_search;
68429 }
68430 return -ENOMEM;
68431 }
68432- if (!vma || addr + len <= vma->vm_start) {
68433- /*
68434- * Remember the place where we stopped the search:
68435- */
68436- mm->free_area_cache = addr + len;
68437- return addr;
68438- }
68439+ if (check_heap_stack_gap(vma, addr, len))
68440+ break;
68441 if (addr + mm->cached_hole_size < vma->vm_start)
68442 mm->cached_hole_size = vma->vm_start - addr;
68443 addr = vma->vm_end;
68444 }
68445+
68446+ /*
68447+ * Remember the place where we stopped the search:
68448+ */
68449+ mm->free_area_cache = addr + len;
68450+ return addr;
68451 }
68452 #endif
68453
68454 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68455 {
68456+
68457+#ifdef CONFIG_PAX_SEGMEXEC
68458+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68459+ return;
68460+#endif
68461+
68462 /*
68463 * Is this a new hole at the lowest possible address?
68464 */
68465- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68466+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68467 mm->free_area_cache = addr;
68468 mm->cached_hole_size = ~0UL;
68469 }
68470@@ -1449,7 +1654,7 @@ arch_get_unmapped_area_topdown(struct fi
68471 {
68472 struct vm_area_struct *vma;
68473 struct mm_struct *mm = current->mm;
68474- unsigned long addr = addr0;
68475+ unsigned long base = mm->mmap_base, addr = addr0;
68476
68477 /* requested length too big for entire address space */
68478 if (len > TASK_SIZE)
68479@@ -1458,13 +1663,18 @@ arch_get_unmapped_area_topdown(struct fi
68480 if (flags & MAP_FIXED)
68481 return addr;
68482
68483+#ifdef CONFIG_PAX_RANDMMAP
68484+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68485+#endif
68486+
68487 /* requesting a specific address */
68488 if (addr) {
68489 addr = PAGE_ALIGN(addr);
68490- vma = find_vma(mm, addr);
68491- if (TASK_SIZE - len >= addr &&
68492- (!vma || addr + len <= vma->vm_start))
68493- return addr;
68494+ if (TASK_SIZE - len >= addr) {
68495+ vma = find_vma(mm, addr);
68496+ if (check_heap_stack_gap(vma, addr, len))
68497+ return addr;
68498+ }
68499 }
68500
68501 /* check if free_area_cache is useful for us */
68502@@ -1479,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
68503 /* make sure it can fit in the remaining address space */
68504 if (addr > len) {
68505 vma = find_vma(mm, addr-len);
68506- if (!vma || addr <= vma->vm_start)
68507+ if (check_heap_stack_gap(vma, addr - len, len))
68508 /* remember the address as a hint for next time */
68509 return (mm->free_area_cache = addr-len);
68510 }
68511@@ -1496,7 +1706,7 @@ arch_get_unmapped_area_topdown(struct fi
68512 * return with success:
68513 */
68514 vma = find_vma(mm, addr);
68515- if (!vma || addr+len <= vma->vm_start)
68516+ if (check_heap_stack_gap(vma, addr, len))
68517 /* remember the address as a hint for next time */
68518 return (mm->free_area_cache = addr);
68519
68520@@ -1505,8 +1715,8 @@ arch_get_unmapped_area_topdown(struct fi
68521 mm->cached_hole_size = vma->vm_start - addr;
68522
68523 /* try just below the current vma->vm_start */
68524- addr = vma->vm_start-len;
68525- } while (len < vma->vm_start);
68526+ addr = skip_heap_stack_gap(vma, len);
68527+ } while (!IS_ERR_VALUE(addr));
68528
68529 bottomup:
68530 /*
68531@@ -1515,13 +1725,21 @@ bottomup:
68532 * can happen with large stack limits and large mmap()
68533 * allocations.
68534 */
68535+ mm->mmap_base = TASK_UNMAPPED_BASE;
68536+
68537+#ifdef CONFIG_PAX_RANDMMAP
68538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68539+ mm->mmap_base += mm->delta_mmap;
68540+#endif
68541+
68542+ mm->free_area_cache = mm->mmap_base;
68543 mm->cached_hole_size = ~0UL;
68544- mm->free_area_cache = TASK_UNMAPPED_BASE;
68545 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68546 /*
68547 * Restore the topdown base:
68548 */
68549- mm->free_area_cache = mm->mmap_base;
68550+ mm->mmap_base = base;
68551+ mm->free_area_cache = base;
68552 mm->cached_hole_size = ~0UL;
68553
68554 return addr;
68555@@ -1530,6 +1748,12 @@ bottomup:
68556
68557 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68558 {
68559+
68560+#ifdef CONFIG_PAX_SEGMEXEC
68561+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68562+ return;
68563+#endif
68564+
68565 /*
68566 * Is this a new hole at the highest possible address?
68567 */
68568@@ -1537,8 +1761,10 @@ void arch_unmap_area_topdown(struct mm_s
68569 mm->free_area_cache = addr;
68570
68571 /* dont allow allocations above current base */
68572- if (mm->free_area_cache > mm->mmap_base)
68573+ if (mm->free_area_cache > mm->mmap_base) {
68574 mm->free_area_cache = mm->mmap_base;
68575+ mm->cached_hole_size = ~0UL;
68576+ }
68577 }
68578
68579 unsigned long
68580@@ -1646,6 +1872,28 @@ out:
68581 return prev ? prev->vm_next : vma;
68582 }
68583
68584+#ifdef CONFIG_PAX_SEGMEXEC
68585+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68586+{
68587+ struct vm_area_struct *vma_m;
68588+
68589+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68590+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68591+ BUG_ON(vma->vm_mirror);
68592+ return NULL;
68593+ }
68594+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68595+ vma_m = vma->vm_mirror;
68596+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68597+ BUG_ON(vma->vm_file != vma_m->vm_file);
68598+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68599+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
68600+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
68601+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68602+ return vma_m;
68603+}
68604+#endif
68605+
68606 /*
68607 * Verify that the stack growth is acceptable and
68608 * update accounting. This is shared with both the
68609@@ -1662,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
68610 return -ENOMEM;
68611
68612 /* Stack limit test */
68613+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68614 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
68615 return -ENOMEM;
68616
68617@@ -1672,6 +1921,7 @@ static int acct_stack_growth(struct vm_a
68618 locked = mm->locked_vm + grow;
68619 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
68620 limit >>= PAGE_SHIFT;
68621+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68622 if (locked > limit && !capable(CAP_IPC_LOCK))
68623 return -ENOMEM;
68624 }
68625@@ -1702,37 +1952,48 @@ static int acct_stack_growth(struct vm_a
68626 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68627 * vma is the last one with address > vma->vm_end. Have to extend vma.
68628 */
68629+#ifndef CONFIG_IA64
68630+static
68631+#endif
68632 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68633 {
68634 int error;
68635+ bool locknext;
68636
68637 if (!(vma->vm_flags & VM_GROWSUP))
68638 return -EFAULT;
68639
68640+ /* Also guard against wrapping around to address 0. */
68641+ if (address < PAGE_ALIGN(address+1))
68642+ address = PAGE_ALIGN(address+1);
68643+ else
68644+ return -ENOMEM;
68645+
68646 /*
68647 * We must make sure the anon_vma is allocated
68648 * so that the anon_vma locking is not a noop.
68649 */
68650 if (unlikely(anon_vma_prepare(vma)))
68651 return -ENOMEM;
68652+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68653+ if (locknext && anon_vma_prepare(vma->vm_next))
68654+ return -ENOMEM;
68655 vma_lock_anon_vma(vma);
68656+ if (locknext)
68657+ vma_lock_anon_vma(vma->vm_next);
68658
68659 /*
68660 * vma->vm_start/vm_end cannot change under us because the caller
68661 * is required to hold the mmap_sem in read mode. We need the
68662- * anon_vma lock to serialize against concurrent expand_stacks.
68663- * Also guard against wrapping around to address 0.
68664+ * anon_vma locks to serialize against concurrent expand_stacks
68665+ * and expand_upwards.
68666 */
68667- if (address < PAGE_ALIGN(address+4))
68668- address = PAGE_ALIGN(address+4);
68669- else {
68670- vma_unlock_anon_vma(vma);
68671- return -ENOMEM;
68672- }
68673 error = 0;
68674
68675 /* Somebody else might have raced and expanded it already */
68676- if (address > vma->vm_end) {
68677+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68678+ error = -ENOMEM;
68679+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68680 unsigned long size, grow;
68681
68682 size = address - vma->vm_start;
68683@@ -1747,6 +2008,8 @@ int expand_upwards(struct vm_area_struct
68684 }
68685 }
68686 }
68687+ if (locknext)
68688+ vma_unlock_anon_vma(vma->vm_next);
68689 vma_unlock_anon_vma(vma);
68690 khugepaged_enter_vma_merge(vma);
68691 return error;
68692@@ -1760,6 +2023,8 @@ int expand_downwards(struct vm_area_stru
68693 unsigned long address)
68694 {
68695 int error;
68696+ bool lockprev = false;
68697+ struct vm_area_struct *prev;
68698
68699 /*
68700 * We must make sure the anon_vma is allocated
68701@@ -1773,6 +2038,15 @@ int expand_downwards(struct vm_area_stru
68702 if (error)
68703 return error;
68704
68705+ prev = vma->vm_prev;
68706+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68707+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68708+#endif
68709+ if (lockprev && anon_vma_prepare(prev))
68710+ return -ENOMEM;
68711+ if (lockprev)
68712+ vma_lock_anon_vma(prev);
68713+
68714 vma_lock_anon_vma(vma);
68715
68716 /*
68717@@ -1782,9 +2056,17 @@ int expand_downwards(struct vm_area_stru
68718 */
68719
68720 /* Somebody else might have raced and expanded it already */
68721- if (address < vma->vm_start) {
68722+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68723+ error = -ENOMEM;
68724+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68725 unsigned long size, grow;
68726
68727+#ifdef CONFIG_PAX_SEGMEXEC
68728+ struct vm_area_struct *vma_m;
68729+
68730+ vma_m = pax_find_mirror_vma(vma);
68731+#endif
68732+
68733 size = vma->vm_end - address;
68734 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68735
68736@@ -1794,11 +2076,22 @@ int expand_downwards(struct vm_area_stru
68737 if (!error) {
68738 vma->vm_start = address;
68739 vma->vm_pgoff -= grow;
68740+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68741+
68742+#ifdef CONFIG_PAX_SEGMEXEC
68743+ if (vma_m) {
68744+ vma_m->vm_start -= grow << PAGE_SHIFT;
68745+ vma_m->vm_pgoff -= grow;
68746+ }
68747+#endif
68748+
68749 perf_event_mmap(vma);
68750 }
68751 }
68752 }
68753 vma_unlock_anon_vma(vma);
68754+ if (lockprev)
68755+ vma_unlock_anon_vma(prev);
68756 khugepaged_enter_vma_merge(vma);
68757 return error;
68758 }
68759@@ -1868,6 +2161,13 @@ static void remove_vma_list(struct mm_st
68760 do {
68761 long nrpages = vma_pages(vma);
68762
68763+#ifdef CONFIG_PAX_SEGMEXEC
68764+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68765+ vma = remove_vma(vma);
68766+ continue;
68767+ }
68768+#endif
68769+
68770 mm->total_vm -= nrpages;
68771 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68772 vma = remove_vma(vma);
68773@@ -1913,6 +2213,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68774 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68775 vma->vm_prev = NULL;
68776 do {
68777+
68778+#ifdef CONFIG_PAX_SEGMEXEC
68779+ if (vma->vm_mirror) {
68780+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68781+ vma->vm_mirror->vm_mirror = NULL;
68782+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68783+ vma->vm_mirror = NULL;
68784+ }
68785+#endif
68786+
68787 rb_erase(&vma->vm_rb, &mm->mm_rb);
68788 mm->map_count--;
68789 tail_vma = vma;
68790@@ -1941,14 +2251,33 @@ static int __split_vma(struct mm_struct
68791 struct vm_area_struct *new;
68792 int err = -ENOMEM;
68793
68794+#ifdef CONFIG_PAX_SEGMEXEC
68795+ struct vm_area_struct *vma_m, *new_m = NULL;
68796+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68797+#endif
68798+
68799 if (is_vm_hugetlb_page(vma) && (addr &
68800 ~(huge_page_mask(hstate_vma(vma)))))
68801 return -EINVAL;
68802
68803+#ifdef CONFIG_PAX_SEGMEXEC
68804+ vma_m = pax_find_mirror_vma(vma);
68805+#endif
68806+
68807 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68808 if (!new)
68809 goto out_err;
68810
68811+#ifdef CONFIG_PAX_SEGMEXEC
68812+ if (vma_m) {
68813+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68814+ if (!new_m) {
68815+ kmem_cache_free(vm_area_cachep, new);
68816+ goto out_err;
68817+ }
68818+ }
68819+#endif
68820+
68821 /* most fields are the same, copy all, and then fixup */
68822 *new = *vma;
68823
68824@@ -1961,6 +2290,22 @@ static int __split_vma(struct mm_struct
68825 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68826 }
68827
68828+#ifdef CONFIG_PAX_SEGMEXEC
68829+ if (vma_m) {
68830+ *new_m = *vma_m;
68831+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
68832+ new_m->vm_mirror = new;
68833+ new->vm_mirror = new_m;
68834+
68835+ if (new_below)
68836+ new_m->vm_end = addr_m;
68837+ else {
68838+ new_m->vm_start = addr_m;
68839+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68840+ }
68841+ }
68842+#endif
68843+
68844 pol = mpol_dup(vma_policy(vma));
68845 if (IS_ERR(pol)) {
68846 err = PTR_ERR(pol);
68847@@ -1986,6 +2331,42 @@ static int __split_vma(struct mm_struct
68848 else
68849 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68850
68851+#ifdef CONFIG_PAX_SEGMEXEC
68852+ if (!err && vma_m) {
68853+ if (anon_vma_clone(new_m, vma_m))
68854+ goto out_free_mpol;
68855+
68856+ mpol_get(pol);
68857+ vma_set_policy(new_m, pol);
68858+
68859+ if (new_m->vm_file) {
68860+ get_file(new_m->vm_file);
68861+ if (vma_m->vm_flags & VM_EXECUTABLE)
68862+ added_exe_file_vma(mm);
68863+ }
68864+
68865+ if (new_m->vm_ops && new_m->vm_ops->open)
68866+ new_m->vm_ops->open(new_m);
68867+
68868+ if (new_below)
68869+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68870+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68871+ else
68872+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68873+
68874+ if (err) {
68875+ if (new_m->vm_ops && new_m->vm_ops->close)
68876+ new_m->vm_ops->close(new_m);
68877+ if (new_m->vm_file) {
68878+ if (vma_m->vm_flags & VM_EXECUTABLE)
68879+ removed_exe_file_vma(mm);
68880+ fput(new_m->vm_file);
68881+ }
68882+ mpol_put(pol);
68883+ }
68884+ }
68885+#endif
68886+
68887 /* Success. */
68888 if (!err)
68889 return 0;
68890@@ -1998,10 +2379,18 @@ static int __split_vma(struct mm_struct
68891 removed_exe_file_vma(mm);
68892 fput(new->vm_file);
68893 }
68894- unlink_anon_vmas(new);
68895 out_free_mpol:
68896 mpol_put(pol);
68897 out_free_vma:
68898+
68899+#ifdef CONFIG_PAX_SEGMEXEC
68900+ if (new_m) {
68901+ unlink_anon_vmas(new_m);
68902+ kmem_cache_free(vm_area_cachep, new_m);
68903+ }
68904+#endif
68905+
68906+ unlink_anon_vmas(new);
68907 kmem_cache_free(vm_area_cachep, new);
68908 out_err:
68909 return err;
68910@@ -2014,6 +2403,15 @@ static int __split_vma(struct mm_struct
68911 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
68912 unsigned long addr, int new_below)
68913 {
68914+
68915+#ifdef CONFIG_PAX_SEGMEXEC
68916+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68917+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68918+ if (mm->map_count >= sysctl_max_map_count-1)
68919+ return -ENOMEM;
68920+ } else
68921+#endif
68922+
68923 if (mm->map_count >= sysctl_max_map_count)
68924 return -ENOMEM;
68925
68926@@ -2025,11 +2423,30 @@ int split_vma(struct mm_struct *mm, stru
68927 * work. This now handles partial unmappings.
68928 * Jeremy Fitzhardinge <jeremy@goop.org>
68929 */
68930+#ifdef CONFIG_PAX_SEGMEXEC
68931 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68932 {
68933+ int ret = __do_munmap(mm, start, len);
68934+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68935+ return ret;
68936+
68937+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68938+}
68939+
68940+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68941+#else
68942+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68943+#endif
68944+{
68945 unsigned long end;
68946 struct vm_area_struct *vma, *prev, *last;
68947
68948+ /*
68949+ * mm->mmap_sem is required to protect against another thread
68950+ * changing the mappings in case we sleep.
68951+ */
68952+ verify_mm_writelocked(mm);
68953+
68954 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68955 return -EINVAL;
68956
68957@@ -2104,6 +2521,8 @@ int do_munmap(struct mm_struct *mm, unsi
68958 /* Fix up all other VM information */
68959 remove_vma_list(mm, vma);
68960
68961+ track_exec_limit(mm, start, end, 0UL);
68962+
68963 return 0;
68964 }
68965
68966@@ -2116,22 +2535,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68967
68968 profile_munmap(addr);
68969
68970+#ifdef CONFIG_PAX_SEGMEXEC
68971+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68972+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68973+ return -EINVAL;
68974+#endif
68975+
68976 down_write(&mm->mmap_sem);
68977 ret = do_munmap(mm, addr, len);
68978 up_write(&mm->mmap_sem);
68979 return ret;
68980 }
68981
68982-static inline void verify_mm_writelocked(struct mm_struct *mm)
68983-{
68984-#ifdef CONFIG_DEBUG_VM
68985- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68986- WARN_ON(1);
68987- up_read(&mm->mmap_sem);
68988- }
68989-#endif
68990-}
68991-
68992 /*
68993 * this is really a simplified "do_mmap". it only handles
68994 * anonymous maps. eventually we may be able to do some
68995@@ -2145,6 +2560,7 @@ unsigned long do_brk(unsigned long addr,
68996 struct rb_node ** rb_link, * rb_parent;
68997 pgoff_t pgoff = addr >> PAGE_SHIFT;
68998 int error;
68999+ unsigned long charged;
69000
69001 len = PAGE_ALIGN(len);
69002 if (!len)
69003@@ -2156,16 +2572,30 @@ unsigned long do_brk(unsigned long addr,
69004
69005 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69006
69007+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69008+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69009+ flags &= ~VM_EXEC;
69010+
69011+#ifdef CONFIG_PAX_MPROTECT
69012+ if (mm->pax_flags & MF_PAX_MPROTECT)
69013+ flags &= ~VM_MAYEXEC;
69014+#endif
69015+
69016+ }
69017+#endif
69018+
69019 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69020 if (error & ~PAGE_MASK)
69021 return error;
69022
69023+ charged = len >> PAGE_SHIFT;
69024+
69025 /*
69026 * mlock MCL_FUTURE?
69027 */
69028 if (mm->def_flags & VM_LOCKED) {
69029 unsigned long locked, lock_limit;
69030- locked = len >> PAGE_SHIFT;
69031+ locked = charged;
69032 locked += mm->locked_vm;
69033 lock_limit = rlimit(RLIMIT_MEMLOCK);
69034 lock_limit >>= PAGE_SHIFT;
69035@@ -2182,22 +2612,22 @@ unsigned long do_brk(unsigned long addr,
69036 /*
69037 * Clear old maps. this also does some error checking for us
69038 */
69039- munmap_back:
69040 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69041 if (vma && vma->vm_start < addr + len) {
69042 if (do_munmap(mm, addr, len))
69043 return -ENOMEM;
69044- goto munmap_back;
69045+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69046+ BUG_ON(vma && vma->vm_start < addr + len);
69047 }
69048
69049 /* Check against address space limits *after* clearing old maps... */
69050- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69051+ if (!may_expand_vm(mm, charged))
69052 return -ENOMEM;
69053
69054 if (mm->map_count > sysctl_max_map_count)
69055 return -ENOMEM;
69056
69057- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69058+ if (security_vm_enough_memory(charged))
69059 return -ENOMEM;
69060
69061 /* Can we just expand an old private anonymous mapping? */
69062@@ -2211,7 +2641,7 @@ unsigned long do_brk(unsigned long addr,
69063 */
69064 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69065 if (!vma) {
69066- vm_unacct_memory(len >> PAGE_SHIFT);
69067+ vm_unacct_memory(charged);
69068 return -ENOMEM;
69069 }
69070
69071@@ -2225,11 +2655,12 @@ unsigned long do_brk(unsigned long addr,
69072 vma_link(mm, vma, prev, rb_link, rb_parent);
69073 out:
69074 perf_event_mmap(vma);
69075- mm->total_vm += len >> PAGE_SHIFT;
69076+ mm->total_vm += charged;
69077 if (flags & VM_LOCKED) {
69078 if (!mlock_vma_pages_range(vma, addr, addr + len))
69079- mm->locked_vm += (len >> PAGE_SHIFT);
69080+ mm->locked_vm += charged;
69081 }
69082+ track_exec_limit(mm, addr, addr + len, flags);
69083 return addr;
69084 }
69085
69086@@ -2276,8 +2707,10 @@ void exit_mmap(struct mm_struct *mm)
69087 * Walk the list again, actually closing and freeing it,
69088 * with preemption enabled, without holding any MM locks.
69089 */
69090- while (vma)
69091+ while (vma) {
69092+ vma->vm_mirror = NULL;
69093 vma = remove_vma(vma);
69094+ }
69095
69096 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69097 }
69098@@ -2291,6 +2724,13 @@ int insert_vm_struct(struct mm_struct *
69099 struct vm_area_struct * __vma, * prev;
69100 struct rb_node ** rb_link, * rb_parent;
69101
69102+#ifdef CONFIG_PAX_SEGMEXEC
69103+ struct vm_area_struct *vma_m = NULL;
69104+#endif
69105+
69106+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69107+ return -EPERM;
69108+
69109 /*
69110 * The vm_pgoff of a purely anonymous vma should be irrelevant
69111 * until its first write fault, when page's anon_vma and index
69112@@ -2313,7 +2753,22 @@ int insert_vm_struct(struct mm_struct *
69113 if ((vma->vm_flags & VM_ACCOUNT) &&
69114 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69115 return -ENOMEM;
69116+
69117+#ifdef CONFIG_PAX_SEGMEXEC
69118+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69119+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69120+ if (!vma_m)
69121+ return -ENOMEM;
69122+ }
69123+#endif
69124+
69125 vma_link(mm, vma, prev, rb_link, rb_parent);
69126+
69127+#ifdef CONFIG_PAX_SEGMEXEC
69128+ if (vma_m)
69129+ BUG_ON(pax_mirror_vma(vma_m, vma));
69130+#endif
69131+
69132 return 0;
69133 }
69134
69135@@ -2331,6 +2786,8 @@ struct vm_area_struct *copy_vma(struct v
69136 struct rb_node **rb_link, *rb_parent;
69137 struct mempolicy *pol;
69138
69139+ BUG_ON(vma->vm_mirror);
69140+
69141 /*
69142 * If anonymous vma has not yet been faulted, update new pgoff
69143 * to match new location, to increase its chance of merging.
69144@@ -2381,6 +2838,39 @@ struct vm_area_struct *copy_vma(struct v
69145 return NULL;
69146 }
69147
69148+#ifdef CONFIG_PAX_SEGMEXEC
69149+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69150+{
69151+ struct vm_area_struct *prev_m;
69152+ struct rb_node **rb_link_m, *rb_parent_m;
69153+ struct mempolicy *pol_m;
69154+
69155+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69156+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69157+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69158+ *vma_m = *vma;
69159+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69160+ if (anon_vma_clone(vma_m, vma))
69161+ return -ENOMEM;
69162+ pol_m = vma_policy(vma_m);
69163+ mpol_get(pol_m);
69164+ vma_set_policy(vma_m, pol_m);
69165+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69166+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69167+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69168+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69169+ if (vma_m->vm_file)
69170+ get_file(vma_m->vm_file);
69171+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69172+ vma_m->vm_ops->open(vma_m);
69173+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69174+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69175+ vma_m->vm_mirror = vma;
69176+ vma->vm_mirror = vma_m;
69177+ return 0;
69178+}
69179+#endif
69180+
69181 /*
69182 * Return true if the calling process may expand its vm space by the passed
69183 * number of pages
69184@@ -2391,7 +2881,7 @@ int may_expand_vm(struct mm_struct *mm,
69185 unsigned long lim;
69186
69187 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69188-
69189+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69190 if (cur + npages > lim)
69191 return 0;
69192 return 1;
69193@@ -2462,6 +2952,22 @@ int install_special_mapping(struct mm_st
69194 vma->vm_start = addr;
69195 vma->vm_end = addr + len;
69196
69197+#ifdef CONFIG_PAX_MPROTECT
69198+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69199+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69200+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69201+ return -EPERM;
69202+ if (!(vm_flags & VM_EXEC))
69203+ vm_flags &= ~VM_MAYEXEC;
69204+#else
69205+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69206+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69207+#endif
69208+ else
69209+ vm_flags &= ~VM_MAYWRITE;
69210+ }
69211+#endif
69212+
69213 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69214 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69215
69216diff -urNp linux-3.0.7/mm/mprotect.c linux-3.0.7/mm/mprotect.c
69217--- linux-3.0.7/mm/mprotect.c 2011-07-21 22:17:23.000000000 -0400
69218+++ linux-3.0.7/mm/mprotect.c 2011-08-23 21:48:14.000000000 -0400
69219@@ -23,10 +23,16 @@
69220 #include <linux/mmu_notifier.h>
69221 #include <linux/migrate.h>
69222 #include <linux/perf_event.h>
69223+
69224+#ifdef CONFIG_PAX_MPROTECT
69225+#include <linux/elf.h>
69226+#endif
69227+
69228 #include <asm/uaccess.h>
69229 #include <asm/pgtable.h>
69230 #include <asm/cacheflush.h>
69231 #include <asm/tlbflush.h>
69232+#include <asm/mmu_context.h>
69233
69234 #ifndef pgprot_modify
69235 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69236@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69237 flush_tlb_range(vma, start, end);
69238 }
69239
69240+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69241+/* called while holding the mmap semaphor for writing except stack expansion */
69242+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69243+{
69244+ unsigned long oldlimit, newlimit = 0UL;
69245+
69246+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69247+ return;
69248+
69249+ spin_lock(&mm->page_table_lock);
69250+ oldlimit = mm->context.user_cs_limit;
69251+ if ((prot & VM_EXEC) && oldlimit < end)
69252+ /* USER_CS limit moved up */
69253+ newlimit = end;
69254+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69255+ /* USER_CS limit moved down */
69256+ newlimit = start;
69257+
69258+ if (newlimit) {
69259+ mm->context.user_cs_limit = newlimit;
69260+
69261+#ifdef CONFIG_SMP
69262+ wmb();
69263+ cpus_clear(mm->context.cpu_user_cs_mask);
69264+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69265+#endif
69266+
69267+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69268+ }
69269+ spin_unlock(&mm->page_table_lock);
69270+ if (newlimit == end) {
69271+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69272+
69273+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69274+ if (is_vm_hugetlb_page(vma))
69275+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69276+ else
69277+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69278+ }
69279+}
69280+#endif
69281+
69282 int
69283 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69284 unsigned long start, unsigned long end, unsigned long newflags)
69285@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69286 int error;
69287 int dirty_accountable = 0;
69288
69289+#ifdef CONFIG_PAX_SEGMEXEC
69290+ struct vm_area_struct *vma_m = NULL;
69291+ unsigned long start_m, end_m;
69292+
69293+ start_m = start + SEGMEXEC_TASK_SIZE;
69294+ end_m = end + SEGMEXEC_TASK_SIZE;
69295+#endif
69296+
69297 if (newflags == oldflags) {
69298 *pprev = vma;
69299 return 0;
69300 }
69301
69302+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69303+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69304+
69305+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69306+ return -ENOMEM;
69307+
69308+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69309+ return -ENOMEM;
69310+ }
69311+
69312 /*
69313 * If we make a private mapping writable we increase our commit;
69314 * but (without finer accounting) cannot reduce our commit if we
69315@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69316 }
69317 }
69318
69319+#ifdef CONFIG_PAX_SEGMEXEC
69320+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69321+ if (start != vma->vm_start) {
69322+ error = split_vma(mm, vma, start, 1);
69323+ if (error)
69324+ goto fail;
69325+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69326+ *pprev = (*pprev)->vm_next;
69327+ }
69328+
69329+ if (end != vma->vm_end) {
69330+ error = split_vma(mm, vma, end, 0);
69331+ if (error)
69332+ goto fail;
69333+ }
69334+
69335+ if (pax_find_mirror_vma(vma)) {
69336+ error = __do_munmap(mm, start_m, end_m - start_m);
69337+ if (error)
69338+ goto fail;
69339+ } else {
69340+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69341+ if (!vma_m) {
69342+ error = -ENOMEM;
69343+ goto fail;
69344+ }
69345+ vma->vm_flags = newflags;
69346+ error = pax_mirror_vma(vma_m, vma);
69347+ if (error) {
69348+ vma->vm_flags = oldflags;
69349+ goto fail;
69350+ }
69351+ }
69352+ }
69353+#endif
69354+
69355 /*
69356 * First try to merge with previous and/or next vma.
69357 */
69358@@ -204,9 +306,21 @@ success:
69359 * vm_flags and vm_page_prot are protected by the mmap_sem
69360 * held in write mode.
69361 */
69362+
69363+#ifdef CONFIG_PAX_SEGMEXEC
69364+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69365+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69366+#endif
69367+
69368 vma->vm_flags = newflags;
69369+
69370+#ifdef CONFIG_PAX_MPROTECT
69371+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69372+ mm->binfmt->handle_mprotect(vma, newflags);
69373+#endif
69374+
69375 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69376- vm_get_page_prot(newflags));
69377+ vm_get_page_prot(vma->vm_flags));
69378
69379 if (vma_wants_writenotify(vma)) {
69380 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69381@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69382 end = start + len;
69383 if (end <= start)
69384 return -ENOMEM;
69385+
69386+#ifdef CONFIG_PAX_SEGMEXEC
69387+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69388+ if (end > SEGMEXEC_TASK_SIZE)
69389+ return -EINVAL;
69390+ } else
69391+#endif
69392+
69393+ if (end > TASK_SIZE)
69394+ return -EINVAL;
69395+
69396 if (!arch_validate_prot(prot))
69397 return -EINVAL;
69398
69399@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69400 /*
69401 * Does the application expect PROT_READ to imply PROT_EXEC:
69402 */
69403- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69404+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69405 prot |= PROT_EXEC;
69406
69407 vm_flags = calc_vm_prot_bits(prot);
69408@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69409 if (start > vma->vm_start)
69410 prev = vma;
69411
69412+#ifdef CONFIG_PAX_MPROTECT
69413+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69414+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69415+#endif
69416+
69417 for (nstart = start ; ; ) {
69418 unsigned long newflags;
69419
69420@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69421
69422 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69423 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69424+ if (prot & (PROT_WRITE | PROT_EXEC))
69425+ gr_log_rwxmprotect(vma->vm_file);
69426+
69427+ error = -EACCES;
69428+ goto out;
69429+ }
69430+
69431+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69432 error = -EACCES;
69433 goto out;
69434 }
69435@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69436 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69437 if (error)
69438 goto out;
69439+
69440+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69441+
69442 nstart = tmp;
69443
69444 if (nstart < prev->vm_end)
69445diff -urNp linux-3.0.7/mm/mremap.c linux-3.0.7/mm/mremap.c
69446--- linux-3.0.7/mm/mremap.c 2011-07-21 22:17:23.000000000 -0400
69447+++ linux-3.0.7/mm/mremap.c 2011-08-23 21:47:56.000000000 -0400
69448@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69449 continue;
69450 pte = ptep_clear_flush(vma, old_addr, old_pte);
69451 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69452+
69453+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69454+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69455+ pte = pte_exprotect(pte);
69456+#endif
69457+
69458 set_pte_at(mm, new_addr, new_pte, pte);
69459 }
69460
69461@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69462 if (is_vm_hugetlb_page(vma))
69463 goto Einval;
69464
69465+#ifdef CONFIG_PAX_SEGMEXEC
69466+ if (pax_find_mirror_vma(vma))
69467+ goto Einval;
69468+#endif
69469+
69470 /* We can't remap across vm area boundaries */
69471 if (old_len > vma->vm_end - addr)
69472 goto Efault;
69473@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69474 unsigned long ret = -EINVAL;
69475 unsigned long charged = 0;
69476 unsigned long map_flags;
69477+ unsigned long pax_task_size = TASK_SIZE;
69478
69479 if (new_addr & ~PAGE_MASK)
69480 goto out;
69481
69482- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69483+#ifdef CONFIG_PAX_SEGMEXEC
69484+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69485+ pax_task_size = SEGMEXEC_TASK_SIZE;
69486+#endif
69487+
69488+ pax_task_size -= PAGE_SIZE;
69489+
69490+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69491 goto out;
69492
69493 /* Check if the location we're moving into overlaps the
69494 * old location at all, and fail if it does.
69495 */
69496- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69497- goto out;
69498-
69499- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69500+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69501 goto out;
69502
69503 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69504@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69505 struct vm_area_struct *vma;
69506 unsigned long ret = -EINVAL;
69507 unsigned long charged = 0;
69508+ unsigned long pax_task_size = TASK_SIZE;
69509
69510 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69511 goto out;
69512@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69513 if (!new_len)
69514 goto out;
69515
69516+#ifdef CONFIG_PAX_SEGMEXEC
69517+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69518+ pax_task_size = SEGMEXEC_TASK_SIZE;
69519+#endif
69520+
69521+ pax_task_size -= PAGE_SIZE;
69522+
69523+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69524+ old_len > pax_task_size || addr > pax_task_size-old_len)
69525+ goto out;
69526+
69527 if (flags & MREMAP_FIXED) {
69528 if (flags & MREMAP_MAYMOVE)
69529 ret = mremap_to(addr, old_len, new_addr, new_len);
69530@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69531 addr + new_len);
69532 }
69533 ret = addr;
69534+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69535 goto out;
69536 }
69537 }
69538@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69539 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69540 if (ret)
69541 goto out;
69542+
69543+ map_flags = vma->vm_flags;
69544 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69545+ if (!(ret & ~PAGE_MASK)) {
69546+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69547+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69548+ }
69549 }
69550 out:
69551 if (ret & ~PAGE_MASK)
69552diff -urNp linux-3.0.7/mm/nobootmem.c linux-3.0.7/mm/nobootmem.c
69553--- linux-3.0.7/mm/nobootmem.c 2011-07-21 22:17:23.000000000 -0400
69554+++ linux-3.0.7/mm/nobootmem.c 2011-08-23 21:47:56.000000000 -0400
69555@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69556 unsigned long __init free_all_memory_core_early(int nodeid)
69557 {
69558 int i;
69559- u64 start, end;
69560+ u64 start, end, startrange, endrange;
69561 unsigned long count = 0;
69562- struct range *range = NULL;
69563+ struct range *range = NULL, rangerange = { 0, 0 };
69564 int nr_range;
69565
69566 nr_range = get_free_all_memory_range(&range, nodeid);
69567+ startrange = __pa(range) >> PAGE_SHIFT;
69568+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69569
69570 for (i = 0; i < nr_range; i++) {
69571 start = range[i].start;
69572 end = range[i].end;
69573+ if (start <= endrange && startrange < end) {
69574+ BUG_ON(rangerange.start | rangerange.end);
69575+ rangerange = range[i];
69576+ continue;
69577+ }
69578 count += end - start;
69579 __free_pages_memory(start, end);
69580 }
69581+ start = rangerange.start;
69582+ end = rangerange.end;
69583+ count += end - start;
69584+ __free_pages_memory(start, end);
69585
69586 return count;
69587 }
69588diff -urNp linux-3.0.7/mm/nommu.c linux-3.0.7/mm/nommu.c
69589--- linux-3.0.7/mm/nommu.c 2011-07-21 22:17:23.000000000 -0400
69590+++ linux-3.0.7/mm/nommu.c 2011-08-23 21:47:56.000000000 -0400
69591@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69592 int sysctl_overcommit_ratio = 50; /* default is 50% */
69593 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69594 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69595-int heap_stack_gap = 0;
69596
69597 atomic_long_t mmap_pages_allocated;
69598
69599@@ -826,15 +825,6 @@ struct vm_area_struct *find_vma(struct m
69600 EXPORT_SYMBOL(find_vma);
69601
69602 /*
69603- * find a VMA
69604- * - we don't extend stack VMAs under NOMMU conditions
69605- */
69606-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69607-{
69608- return find_vma(mm, addr);
69609-}
69610-
69611-/*
69612 * expand a stack to a given address
69613 * - not supported under NOMMU conditions
69614 */
69615@@ -1554,6 +1544,7 @@ int split_vma(struct mm_struct *mm, stru
69616
69617 /* most fields are the same, copy all, and then fixup */
69618 *new = *vma;
69619+ INIT_LIST_HEAD(&new->anon_vma_chain);
69620 *region = *vma->vm_region;
69621 new->vm_region = region;
69622
69623diff -urNp linux-3.0.7/mm/page_alloc.c linux-3.0.7/mm/page_alloc.c
69624--- linux-3.0.7/mm/page_alloc.c 2011-10-16 21:54:54.000000000 -0400
69625+++ linux-3.0.7/mm/page_alloc.c 2011-10-16 21:55:28.000000000 -0400
69626@@ -340,7 +340,7 @@ out:
69627 * This usage means that zero-order pages may not be compound.
69628 */
69629
69630-static void free_compound_page(struct page *page)
69631+void free_compound_page(struct page *page)
69632 {
69633 __free_pages_ok(page, compound_order(page));
69634 }
69635@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
69636 int i;
69637 int bad = 0;
69638
69639+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69640+ unsigned long index = 1UL << order;
69641+#endif
69642+
69643 trace_mm_page_free_direct(page, order);
69644 kmemcheck_free_shadow(page, order);
69645
69646@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
69647 debug_check_no_obj_freed(page_address(page),
69648 PAGE_SIZE << order);
69649 }
69650+
69651+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69652+ for (; index; --index)
69653+ sanitize_highpage(page + index - 1);
69654+#endif
69655+
69656 arch_free_page(page, order);
69657 kernel_map_pages(page, 1 << order, 0);
69658
69659@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
69660 arch_alloc_page(page, order);
69661 kernel_map_pages(page, 1 << order, 1);
69662
69663+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69664 if (gfp_flags & __GFP_ZERO)
69665 prep_zero_page(page, order, gfp_flags);
69666+#endif
69667
69668 if (order && (gfp_flags & __GFP_COMP))
69669 prep_compound_page(page, order);
69670@@ -2557,6 +2569,8 @@ void show_free_areas(unsigned int filter
69671 int cpu;
69672 struct zone *zone;
69673
69674+ pax_track_stack();
69675+
69676 for_each_populated_zone(zone) {
69677 if (skip_free_areas_node(filter, zone_to_nid(zone)))
69678 continue;
69679diff -urNp linux-3.0.7/mm/percpu.c linux-3.0.7/mm/percpu.c
69680--- linux-3.0.7/mm/percpu.c 2011-07-21 22:17:23.000000000 -0400
69681+++ linux-3.0.7/mm/percpu.c 2011-08-23 21:47:56.000000000 -0400
69682@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
69683 static unsigned int pcpu_last_unit_cpu __read_mostly;
69684
69685 /* the address of the first chunk which starts with the kernel static area */
69686-void *pcpu_base_addr __read_mostly;
69687+void *pcpu_base_addr __read_only;
69688 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69689
69690 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69691diff -urNp linux-3.0.7/mm/rmap.c linux-3.0.7/mm/rmap.c
69692--- linux-3.0.7/mm/rmap.c 2011-07-21 22:17:23.000000000 -0400
69693+++ linux-3.0.7/mm/rmap.c 2011-08-23 21:47:56.000000000 -0400
69694@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_stru
69695 struct anon_vma *anon_vma = vma->anon_vma;
69696 struct anon_vma_chain *avc;
69697
69698+#ifdef CONFIG_PAX_SEGMEXEC
69699+ struct anon_vma_chain *avc_m = NULL;
69700+#endif
69701+
69702 might_sleep();
69703 if (unlikely(!anon_vma)) {
69704 struct mm_struct *mm = vma->vm_mm;
69705@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_stru
69706 if (!avc)
69707 goto out_enomem;
69708
69709+#ifdef CONFIG_PAX_SEGMEXEC
69710+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
69711+ if (!avc_m)
69712+ goto out_enomem_free_avc;
69713+#endif
69714+
69715 anon_vma = find_mergeable_anon_vma(vma);
69716 allocated = NULL;
69717 if (!anon_vma) {
69718@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_stru
69719 /* page_table_lock to protect against threads */
69720 spin_lock(&mm->page_table_lock);
69721 if (likely(!vma->anon_vma)) {
69722+
69723+#ifdef CONFIG_PAX_SEGMEXEC
69724+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69725+
69726+ if (vma_m) {
69727+ BUG_ON(vma_m->anon_vma);
69728+ vma_m->anon_vma = anon_vma;
69729+ avc_m->anon_vma = anon_vma;
69730+ avc_m->vma = vma;
69731+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
69732+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
69733+ avc_m = NULL;
69734+ }
69735+#endif
69736+
69737 vma->anon_vma = anon_vma;
69738 avc->anon_vma = anon_vma;
69739 avc->vma = vma;
69740@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_stru
69741
69742 if (unlikely(allocated))
69743 put_anon_vma(allocated);
69744+
69745+#ifdef CONFIG_PAX_SEGMEXEC
69746+ if (unlikely(avc_m))
69747+ anon_vma_chain_free(avc_m);
69748+#endif
69749+
69750 if (unlikely(avc))
69751 anon_vma_chain_free(avc);
69752 }
69753 return 0;
69754
69755 out_enomem_free_avc:
69756+
69757+#ifdef CONFIG_PAX_SEGMEXEC
69758+ if (avc_m)
69759+ anon_vma_chain_free(avc_m);
69760+#endif
69761+
69762 anon_vma_chain_free(avc);
69763 out_enomem:
69764 return -ENOMEM;
69765@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct v
69766 * Attach the anon_vmas from src to dst.
69767 * Returns 0 on success, -ENOMEM on failure.
69768 */
69769-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
69770+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
69771 {
69772 struct anon_vma_chain *avc, *pavc;
69773 struct anon_vma *root = NULL;
69774@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct
69775 * the corresponding VMA in the parent process is attached to.
69776 * Returns 0 on success, non-zero on failure.
69777 */
69778-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
69779+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
69780 {
69781 struct anon_vma_chain *avc;
69782 struct anon_vma *anon_vma;
69783diff -urNp linux-3.0.7/mm/shmem.c linux-3.0.7/mm/shmem.c
69784--- linux-3.0.7/mm/shmem.c 2011-07-21 22:17:23.000000000 -0400
69785+++ linux-3.0.7/mm/shmem.c 2011-08-23 21:48:14.000000000 -0400
69786@@ -31,7 +31,7 @@
69787 #include <linux/percpu_counter.h>
69788 #include <linux/swap.h>
69789
69790-static struct vfsmount *shm_mnt;
69791+struct vfsmount *shm_mnt;
69792
69793 #ifdef CONFIG_SHMEM
69794 /*
69795@@ -1101,6 +1101,8 @@ static int shmem_writepage(struct page *
69796 goto unlock;
69797 }
69798 entry = shmem_swp_entry(info, index, NULL);
69799+ if (!entry)
69800+ goto unlock;
69801 if (entry->val) {
69802 /*
69803 * The more uptodate page coming down from a stacked
69804@@ -1172,6 +1174,8 @@ static struct page *shmem_swapin(swp_ent
69805 struct vm_area_struct pvma;
69806 struct page *page;
69807
69808+ pax_track_stack();
69809+
69810 spol = mpol_cond_copy(&mpol,
69811 mpol_shared_policy_lookup(&info->policy, idx));
69812
69813@@ -2568,8 +2572,7 @@ int shmem_fill_super(struct super_block
69814 int err = -ENOMEM;
69815
69816 /* Round up to L1_CACHE_BYTES to resist false sharing */
69817- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69818- L1_CACHE_BYTES), GFP_KERNEL);
69819+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69820 if (!sbinfo)
69821 return -ENOMEM;
69822
69823diff -urNp linux-3.0.7/mm/slab.c linux-3.0.7/mm/slab.c
69824--- linux-3.0.7/mm/slab.c 2011-07-21 22:17:23.000000000 -0400
69825+++ linux-3.0.7/mm/slab.c 2011-08-23 21:48:14.000000000 -0400
69826@@ -151,7 +151,7 @@
69827
69828 /* Legal flag mask for kmem_cache_create(). */
69829 #if DEBUG
69830-# define CREATE_MASK (SLAB_RED_ZONE | \
69831+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69832 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69833 SLAB_CACHE_DMA | \
69834 SLAB_STORE_USER | \
69835@@ -159,7 +159,7 @@
69836 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69837 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69838 #else
69839-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69840+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69841 SLAB_CACHE_DMA | \
69842 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69843 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69844@@ -288,7 +288,7 @@ struct kmem_list3 {
69845 * Need this for bootstrapping a per node allocator.
69846 */
69847 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69848-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69849+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69850 #define CACHE_CACHE 0
69851 #define SIZE_AC MAX_NUMNODES
69852 #define SIZE_L3 (2 * MAX_NUMNODES)
69853@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
69854 if ((x)->max_freeable < i) \
69855 (x)->max_freeable = i; \
69856 } while (0)
69857-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69858-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69859-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69860-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69861+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69862+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69863+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69864+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69865 #else
69866 #define STATS_INC_ACTIVE(x) do { } while (0)
69867 #define STATS_DEC_ACTIVE(x) do { } while (0)
69868@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
69869 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69870 */
69871 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69872- const struct slab *slab, void *obj)
69873+ const struct slab *slab, const void *obj)
69874 {
69875 u32 offset = (obj - slab->s_mem);
69876 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69877@@ -564,7 +564,7 @@ struct cache_names {
69878 static struct cache_names __initdata cache_names[] = {
69879 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
69880 #include <linux/kmalloc_sizes.h>
69881- {NULL,}
69882+ {NULL}
69883 #undef CACHE
69884 };
69885
69886@@ -1530,7 +1530,7 @@ void __init kmem_cache_init(void)
69887 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69888 sizes[INDEX_AC].cs_size,
69889 ARCH_KMALLOC_MINALIGN,
69890- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69891+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69892 NULL);
69893
69894 if (INDEX_AC != INDEX_L3) {
69895@@ -1538,7 +1538,7 @@ void __init kmem_cache_init(void)
69896 kmem_cache_create(names[INDEX_L3].name,
69897 sizes[INDEX_L3].cs_size,
69898 ARCH_KMALLOC_MINALIGN,
69899- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69900+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69901 NULL);
69902 }
69903
69904@@ -1556,7 +1556,7 @@ void __init kmem_cache_init(void)
69905 sizes->cs_cachep = kmem_cache_create(names->name,
69906 sizes->cs_size,
69907 ARCH_KMALLOC_MINALIGN,
69908- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69909+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69910 NULL);
69911 }
69912 #ifdef CONFIG_ZONE_DMA
69913@@ -4272,10 +4272,10 @@ static int s_show(struct seq_file *m, vo
69914 }
69915 /* cpu stats */
69916 {
69917- unsigned long allochit = atomic_read(&cachep->allochit);
69918- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69919- unsigned long freehit = atomic_read(&cachep->freehit);
69920- unsigned long freemiss = atomic_read(&cachep->freemiss);
69921+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69922+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69923+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69924+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69925
69926 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69927 allochit, allocmiss, freehit, freemiss);
69928@@ -4532,15 +4532,66 @@ static const struct file_operations proc
69929
69930 static int __init slab_proc_init(void)
69931 {
69932- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69933+ mode_t gr_mode = S_IRUGO;
69934+
69935+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69936+ gr_mode = S_IRUSR;
69937+#endif
69938+
69939+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69940 #ifdef CONFIG_DEBUG_SLAB_LEAK
69941- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69942+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69943 #endif
69944 return 0;
69945 }
69946 module_init(slab_proc_init);
69947 #endif
69948
69949+void check_object_size(const void *ptr, unsigned long n, bool to)
69950+{
69951+
69952+#ifdef CONFIG_PAX_USERCOPY
69953+ struct page *page;
69954+ struct kmem_cache *cachep = NULL;
69955+ struct slab *slabp;
69956+ unsigned int objnr;
69957+ unsigned long offset;
69958+
69959+ if (!n)
69960+ return;
69961+
69962+ if (ZERO_OR_NULL_PTR(ptr))
69963+ goto report;
69964+
69965+ if (!virt_addr_valid(ptr))
69966+ return;
69967+
69968+ page = virt_to_head_page(ptr);
69969+
69970+ if (!PageSlab(page)) {
69971+ if (object_is_on_stack(ptr, n) == -1)
69972+ goto report;
69973+ return;
69974+ }
69975+
69976+ cachep = page_get_cache(page);
69977+ if (!(cachep->flags & SLAB_USERCOPY))
69978+ goto report;
69979+
69980+ slabp = page_get_slab(page);
69981+ objnr = obj_to_index(cachep, slabp, ptr);
69982+ BUG_ON(objnr >= cachep->num);
69983+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69984+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69985+ return;
69986+
69987+report:
69988+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69989+#endif
69990+
69991+}
69992+EXPORT_SYMBOL(check_object_size);
69993+
69994 /**
69995 * ksize - get the actual amount of memory allocated for a given object
69996 * @objp: Pointer to the object
69997diff -urNp linux-3.0.7/mm/slob.c linux-3.0.7/mm/slob.c
69998--- linux-3.0.7/mm/slob.c 2011-07-21 22:17:23.000000000 -0400
69999+++ linux-3.0.7/mm/slob.c 2011-08-23 21:47:56.000000000 -0400
70000@@ -29,7 +29,7 @@
70001 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70002 * alloc_pages() directly, allocating compound pages so the page order
70003 * does not have to be separately tracked, and also stores the exact
70004- * allocation size in page->private so that it can be used to accurately
70005+ * allocation size in slob_page->size so that it can be used to accurately
70006 * provide ksize(). These objects are detected in kfree() because slob_page()
70007 * is false for them.
70008 *
70009@@ -58,6 +58,7 @@
70010 */
70011
70012 #include <linux/kernel.h>
70013+#include <linux/sched.h>
70014 #include <linux/slab.h>
70015 #include <linux/mm.h>
70016 #include <linux/swap.h> /* struct reclaim_state */
70017@@ -102,7 +103,8 @@ struct slob_page {
70018 unsigned long flags; /* mandatory */
70019 atomic_t _count; /* mandatory */
70020 slobidx_t units; /* free units left in page */
70021- unsigned long pad[2];
70022+ unsigned long pad[1];
70023+ unsigned long size; /* size when >=PAGE_SIZE */
70024 slob_t *free; /* first free slob_t in page */
70025 struct list_head list; /* linked list of free pages */
70026 };
70027@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70028 */
70029 static inline int is_slob_page(struct slob_page *sp)
70030 {
70031- return PageSlab((struct page *)sp);
70032+ return PageSlab((struct page *)sp) && !sp->size;
70033 }
70034
70035 static inline void set_slob_page(struct slob_page *sp)
70036@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70037
70038 static inline struct slob_page *slob_page(const void *addr)
70039 {
70040- return (struct slob_page *)virt_to_page(addr);
70041+ return (struct slob_page *)virt_to_head_page(addr);
70042 }
70043
70044 /*
70045@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70046 /*
70047 * Return the size of a slob block.
70048 */
70049-static slobidx_t slob_units(slob_t *s)
70050+static slobidx_t slob_units(const slob_t *s)
70051 {
70052 if (s->units > 0)
70053 return s->units;
70054@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70055 /*
70056 * Return the next free slob block pointer after this one.
70057 */
70058-static slob_t *slob_next(slob_t *s)
70059+static slob_t *slob_next(const slob_t *s)
70060 {
70061 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70062 slobidx_t next;
70063@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70064 /*
70065 * Returns true if s is the last free block in its page.
70066 */
70067-static int slob_last(slob_t *s)
70068+static int slob_last(const slob_t *s)
70069 {
70070 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70071 }
70072@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70073 if (!page)
70074 return NULL;
70075
70076+ set_slob_page(page);
70077 return page_address(page);
70078 }
70079
70080@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70081 if (!b)
70082 return NULL;
70083 sp = slob_page(b);
70084- set_slob_page(sp);
70085
70086 spin_lock_irqsave(&slob_lock, flags);
70087 sp->units = SLOB_UNITS(PAGE_SIZE);
70088 sp->free = b;
70089+ sp->size = 0;
70090 INIT_LIST_HEAD(&sp->list);
70091 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70092 set_slob_page_free(sp, slob_list);
70093@@ -476,10 +479,9 @@ out:
70094 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70095 */
70096
70097-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70098+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70099 {
70100- unsigned int *m;
70101- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70102+ slob_t *m;
70103 void *ret;
70104
70105 lockdep_trace_alloc(gfp);
70106@@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
70107
70108 if (!m)
70109 return NULL;
70110- *m = size;
70111+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70112+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70113+ m[0].units = size;
70114+ m[1].units = align;
70115 ret = (void *)m + align;
70116
70117 trace_kmalloc_node(_RET_IP_, ret,
70118@@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
70119 gfp |= __GFP_COMP;
70120 ret = slob_new_pages(gfp, order, node);
70121 if (ret) {
70122- struct page *page;
70123- page = virt_to_page(ret);
70124- page->private = size;
70125+ struct slob_page *sp;
70126+ sp = slob_page(ret);
70127+ sp->size = size;
70128 }
70129
70130 trace_kmalloc_node(_RET_IP_, ret,
70131 size, PAGE_SIZE << order, gfp, node);
70132 }
70133
70134- kmemleak_alloc(ret, size, 1, gfp);
70135+ return ret;
70136+}
70137+
70138+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70139+{
70140+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70141+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70142+
70143+ if (!ZERO_OR_NULL_PTR(ret))
70144+ kmemleak_alloc(ret, size, 1, gfp);
70145 return ret;
70146 }
70147 EXPORT_SYMBOL(__kmalloc_node);
70148@@ -531,13 +545,88 @@ void kfree(const void *block)
70149 sp = slob_page(block);
70150 if (is_slob_page(sp)) {
70151 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70152- unsigned int *m = (unsigned int *)(block - align);
70153- slob_free(m, *m + align);
70154- } else
70155+ slob_t *m = (slob_t *)(block - align);
70156+ slob_free(m, m[0].units + align);
70157+ } else {
70158+ clear_slob_page(sp);
70159+ free_slob_page(sp);
70160+ sp->size = 0;
70161 put_page(&sp->page);
70162+ }
70163 }
70164 EXPORT_SYMBOL(kfree);
70165
70166+void check_object_size(const void *ptr, unsigned long n, bool to)
70167+{
70168+
70169+#ifdef CONFIG_PAX_USERCOPY
70170+ struct slob_page *sp;
70171+ const slob_t *free;
70172+ const void *base;
70173+ unsigned long flags;
70174+
70175+ if (!n)
70176+ return;
70177+
70178+ if (ZERO_OR_NULL_PTR(ptr))
70179+ goto report;
70180+
70181+ if (!virt_addr_valid(ptr))
70182+ return;
70183+
70184+ sp = slob_page(ptr);
70185+ if (!PageSlab((struct page*)sp)) {
70186+ if (object_is_on_stack(ptr, n) == -1)
70187+ goto report;
70188+ return;
70189+ }
70190+
70191+ if (sp->size) {
70192+ base = page_address(&sp->page);
70193+ if (base <= ptr && n <= sp->size - (ptr - base))
70194+ return;
70195+ goto report;
70196+ }
70197+
70198+ /* some tricky double walking to find the chunk */
70199+ spin_lock_irqsave(&slob_lock, flags);
70200+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70201+ free = sp->free;
70202+
70203+ while (!slob_last(free) && (void *)free <= ptr) {
70204+ base = free + slob_units(free);
70205+ free = slob_next(free);
70206+ }
70207+
70208+ while (base < (void *)free) {
70209+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70210+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70211+ int offset;
70212+
70213+ if (ptr < base + align)
70214+ break;
70215+
70216+ offset = ptr - base - align;
70217+ if (offset >= m) {
70218+ base += size;
70219+ continue;
70220+ }
70221+
70222+ if (n > m - offset)
70223+ break;
70224+
70225+ spin_unlock_irqrestore(&slob_lock, flags);
70226+ return;
70227+ }
70228+
70229+ spin_unlock_irqrestore(&slob_lock, flags);
70230+report:
70231+ pax_report_usercopy(ptr, n, to, NULL);
70232+#endif
70233+
70234+}
70235+EXPORT_SYMBOL(check_object_size);
70236+
70237 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70238 size_t ksize(const void *block)
70239 {
70240@@ -550,10 +639,10 @@ size_t ksize(const void *block)
70241 sp = slob_page(block);
70242 if (is_slob_page(sp)) {
70243 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70244- unsigned int *m = (unsigned int *)(block - align);
70245- return SLOB_UNITS(*m) * SLOB_UNIT;
70246+ slob_t *m = (slob_t *)(block - align);
70247+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70248 } else
70249- return sp->page.private;
70250+ return sp->size;
70251 }
70252 EXPORT_SYMBOL(ksize);
70253
70254@@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
70255 {
70256 struct kmem_cache *c;
70257
70258+#ifdef CONFIG_PAX_USERCOPY
70259+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70260+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70261+#else
70262 c = slob_alloc(sizeof(struct kmem_cache),
70263 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70264+#endif
70265
70266 if (c) {
70267 c->name = name;
70268@@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
70269 {
70270 void *b;
70271
70272+#ifdef CONFIG_PAX_USERCOPY
70273+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70274+#else
70275 if (c->size < PAGE_SIZE) {
70276 b = slob_alloc(c->size, flags, c->align, node);
70277 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70278 SLOB_UNITS(c->size) * SLOB_UNIT,
70279 flags, node);
70280 } else {
70281+ struct slob_page *sp;
70282+
70283 b = slob_new_pages(flags, get_order(c->size), node);
70284+ sp = slob_page(b);
70285+ sp->size = c->size;
70286 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70287 PAGE_SIZE << get_order(c->size),
70288 flags, node);
70289 }
70290+#endif
70291
70292 if (c->ctor)
70293 c->ctor(b);
70294@@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70295
70296 static void __kmem_cache_free(void *b, int size)
70297 {
70298- if (size < PAGE_SIZE)
70299+ struct slob_page *sp = slob_page(b);
70300+
70301+ if (is_slob_page(sp))
70302 slob_free(b, size);
70303- else
70304+ else {
70305+ clear_slob_page(sp);
70306+ free_slob_page(sp);
70307+ sp->size = 0;
70308 slob_free_pages(b, get_order(size));
70309+ }
70310 }
70311
70312 static void kmem_rcu_free(struct rcu_head *head)
70313@@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
70314
70315 void kmem_cache_free(struct kmem_cache *c, void *b)
70316 {
70317+ int size = c->size;
70318+
70319+#ifdef CONFIG_PAX_USERCOPY
70320+ if (size + c->align < PAGE_SIZE) {
70321+ size += c->align;
70322+ b -= c->align;
70323+ }
70324+#endif
70325+
70326 kmemleak_free_recursive(b, c->flags);
70327 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70328 struct slob_rcu *slob_rcu;
70329- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70330- slob_rcu->size = c->size;
70331+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70332+ slob_rcu->size = size;
70333 call_rcu(&slob_rcu->head, kmem_rcu_free);
70334 } else {
70335- __kmem_cache_free(b, c->size);
70336+ __kmem_cache_free(b, size);
70337 }
70338
70339+#ifdef CONFIG_PAX_USERCOPY
70340+ trace_kfree(_RET_IP_, b);
70341+#else
70342 trace_kmem_cache_free(_RET_IP_, b);
70343+#endif
70344+
70345 }
70346 EXPORT_SYMBOL(kmem_cache_free);
70347
70348diff -urNp linux-3.0.7/mm/slub.c linux-3.0.7/mm/slub.c
70349--- linux-3.0.7/mm/slub.c 2011-07-21 22:17:23.000000000 -0400
70350+++ linux-3.0.7/mm/slub.c 2011-09-25 22:15:40.000000000 -0400
70351@@ -200,7 +200,7 @@ struct track {
70352
70353 enum track_item { TRACK_ALLOC, TRACK_FREE };
70354
70355-#ifdef CONFIG_SYSFS
70356+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70357 static int sysfs_slab_add(struct kmem_cache *);
70358 static int sysfs_slab_alias(struct kmem_cache *, const char *);
70359 static void sysfs_slab_remove(struct kmem_cache *);
70360@@ -442,7 +442,7 @@ static void print_track(const char *s, s
70361 if (!t->addr)
70362 return;
70363
70364- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70365+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70366 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70367 }
70368
70369@@ -2137,6 +2137,8 @@ void kmem_cache_free(struct kmem_cache *
70370
70371 page = virt_to_head_page(x);
70372
70373+ BUG_ON(!PageSlab(page));
70374+
70375 slab_free(s, page, x, _RET_IP_);
70376
70377 trace_kmem_cache_free(_RET_IP_, x);
70378@@ -2170,7 +2172,7 @@ static int slub_min_objects;
70379 * Merge control. If this is set then no merging of slab caches will occur.
70380 * (Could be removed. This was introduced to pacify the merge skeptics.)
70381 */
70382-static int slub_nomerge;
70383+static int slub_nomerge = 1;
70384
70385 /*
70386 * Calculate the order of allocation given an slab object size.
70387@@ -2594,7 +2596,7 @@ static int kmem_cache_open(struct kmem_c
70388 * list to avoid pounding the page allocator excessively.
70389 */
70390 set_min_partial(s, ilog2(s->size));
70391- s->refcount = 1;
70392+ atomic_set(&s->refcount, 1);
70393 #ifdef CONFIG_NUMA
70394 s->remote_node_defrag_ratio = 1000;
70395 #endif
70396@@ -2699,8 +2701,7 @@ static inline int kmem_cache_close(struc
70397 void kmem_cache_destroy(struct kmem_cache *s)
70398 {
70399 down_write(&slub_lock);
70400- s->refcount--;
70401- if (!s->refcount) {
70402+ if (atomic_dec_and_test(&s->refcount)) {
70403 list_del(&s->list);
70404 if (kmem_cache_close(s)) {
70405 printk(KERN_ERR "SLUB %s: %s called for cache that "
70406@@ -2910,6 +2911,46 @@ void *__kmalloc_node(size_t size, gfp_t
70407 EXPORT_SYMBOL(__kmalloc_node);
70408 #endif
70409
70410+void check_object_size(const void *ptr, unsigned long n, bool to)
70411+{
70412+
70413+#ifdef CONFIG_PAX_USERCOPY
70414+ struct page *page;
70415+ struct kmem_cache *s = NULL;
70416+ unsigned long offset;
70417+
70418+ if (!n)
70419+ return;
70420+
70421+ if (ZERO_OR_NULL_PTR(ptr))
70422+ goto report;
70423+
70424+ if (!virt_addr_valid(ptr))
70425+ return;
70426+
70427+ page = virt_to_head_page(ptr);
70428+
70429+ if (!PageSlab(page)) {
70430+ if (object_is_on_stack(ptr, n) == -1)
70431+ goto report;
70432+ return;
70433+ }
70434+
70435+ s = page->slab;
70436+ if (!(s->flags & SLAB_USERCOPY))
70437+ goto report;
70438+
70439+ offset = (ptr - page_address(page)) % s->size;
70440+ if (offset <= s->objsize && n <= s->objsize - offset)
70441+ return;
70442+
70443+report:
70444+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70445+#endif
70446+
70447+}
70448+EXPORT_SYMBOL(check_object_size);
70449+
70450 size_t ksize(const void *object)
70451 {
70452 struct page *page;
70453@@ -3154,7 +3195,7 @@ static void __init kmem_cache_bootstrap_
70454 int node;
70455
70456 list_add(&s->list, &slab_caches);
70457- s->refcount = -1;
70458+ atomic_set(&s->refcount, -1);
70459
70460 for_each_node_state(node, N_NORMAL_MEMORY) {
70461 struct kmem_cache_node *n = get_node(s, node);
70462@@ -3271,17 +3312,17 @@ void __init kmem_cache_init(void)
70463
70464 /* Caches that are not of the two-to-the-power-of size */
70465 if (KMALLOC_MIN_SIZE <= 32) {
70466- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70467+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70468 caches++;
70469 }
70470
70471 if (KMALLOC_MIN_SIZE <= 64) {
70472- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70473+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70474 caches++;
70475 }
70476
70477 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70478- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70479+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70480 caches++;
70481 }
70482
70483@@ -3349,7 +3390,7 @@ static int slab_unmergeable(struct kmem_
70484 /*
70485 * We may have set a slab to be unmergeable during bootstrap.
70486 */
70487- if (s->refcount < 0)
70488+ if (atomic_read(&s->refcount) < 0)
70489 return 1;
70490
70491 return 0;
70492@@ -3408,7 +3449,7 @@ struct kmem_cache *kmem_cache_create(con
70493 down_write(&slub_lock);
70494 s = find_mergeable(size, align, flags, name, ctor);
70495 if (s) {
70496- s->refcount++;
70497+ atomic_inc(&s->refcount);
70498 /*
70499 * Adjust the object sizes so that we clear
70500 * the complete object on kzalloc.
70501@@ -3417,7 +3458,7 @@ struct kmem_cache *kmem_cache_create(con
70502 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70503
70504 if (sysfs_slab_alias(s, name)) {
70505- s->refcount--;
70506+ atomic_dec(&s->refcount);
70507 goto err;
70508 }
70509 up_write(&slub_lock);
70510@@ -3545,7 +3586,7 @@ void *__kmalloc_node_track_caller(size_t
70511 }
70512 #endif
70513
70514-#ifdef CONFIG_SYSFS
70515+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70516 static int count_inuse(struct page *page)
70517 {
70518 return page->inuse;
70519@@ -3935,12 +3976,12 @@ static void resiliency_test(void)
70520 validate_slab_cache(kmalloc_caches[9]);
70521 }
70522 #else
70523-#ifdef CONFIG_SYSFS
70524+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70525 static void resiliency_test(void) {};
70526 #endif
70527 #endif
70528
70529-#ifdef CONFIG_SYSFS
70530+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70531 enum slab_stat_type {
70532 SL_ALL, /* All slabs */
70533 SL_PARTIAL, /* Only partially allocated slabs */
70534@@ -4150,7 +4191,7 @@ SLAB_ATTR_RO(ctor);
70535
70536 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70537 {
70538- return sprintf(buf, "%d\n", s->refcount - 1);
70539+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70540 }
70541 SLAB_ATTR_RO(aliases);
70542
70543@@ -4662,6 +4703,7 @@ static char *create_unique_id(struct kme
70544 return name;
70545 }
70546
70547+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70548 static int sysfs_slab_add(struct kmem_cache *s)
70549 {
70550 int err;
70551@@ -4724,6 +4766,7 @@ static void sysfs_slab_remove(struct kme
70552 kobject_del(&s->kobj);
70553 kobject_put(&s->kobj);
70554 }
70555+#endif
70556
70557 /*
70558 * Need to buffer aliases during bootup until sysfs becomes
70559@@ -4737,6 +4780,7 @@ struct saved_alias {
70560
70561 static struct saved_alias *alias_list;
70562
70563+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70564 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
70565 {
70566 struct saved_alias *al;
70567@@ -4759,6 +4803,7 @@ static int sysfs_slab_alias(struct kmem_
70568 alias_list = al;
70569 return 0;
70570 }
70571+#endif
70572
70573 static int __init slab_sysfs_init(void)
70574 {
70575@@ -4894,7 +4939,13 @@ static const struct file_operations proc
70576
70577 static int __init slab_proc_init(void)
70578 {
70579- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70580+ mode_t gr_mode = S_IRUGO;
70581+
70582+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70583+ gr_mode = S_IRUSR;
70584+#endif
70585+
70586+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70587 return 0;
70588 }
70589 module_init(slab_proc_init);
70590diff -urNp linux-3.0.7/mm/swap.c linux-3.0.7/mm/swap.c
70591--- linux-3.0.7/mm/swap.c 2011-07-21 22:17:23.000000000 -0400
70592+++ linux-3.0.7/mm/swap.c 2011-08-23 21:47:56.000000000 -0400
70593@@ -31,6 +31,7 @@
70594 #include <linux/backing-dev.h>
70595 #include <linux/memcontrol.h>
70596 #include <linux/gfp.h>
70597+#include <linux/hugetlb.h>
70598
70599 #include "internal.h"
70600
70601@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
70602
70603 __page_cache_release(page);
70604 dtor = get_compound_page_dtor(page);
70605+ if (!PageHuge(page))
70606+ BUG_ON(dtor != free_compound_page);
70607 (*dtor)(page);
70608 }
70609
70610diff -urNp linux-3.0.7/mm/swapfile.c linux-3.0.7/mm/swapfile.c
70611--- linux-3.0.7/mm/swapfile.c 2011-07-21 22:17:23.000000000 -0400
70612+++ linux-3.0.7/mm/swapfile.c 2011-08-23 21:47:56.000000000 -0400
70613@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
70614
70615 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
70616 /* Activity counter to indicate that a swapon or swapoff has occurred */
70617-static atomic_t proc_poll_event = ATOMIC_INIT(0);
70618+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
70619
70620 static inline unsigned char swap_count(unsigned char ent)
70621 {
70622@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
70623 }
70624 filp_close(swap_file, NULL);
70625 err = 0;
70626- atomic_inc(&proc_poll_event);
70627+ atomic_inc_unchecked(&proc_poll_event);
70628 wake_up_interruptible(&proc_poll_wait);
70629
70630 out_dput:
70631@@ -1692,8 +1692,8 @@ static unsigned swaps_poll(struct file *
70632
70633 poll_wait(file, &proc_poll_wait, wait);
70634
70635- if (s->event != atomic_read(&proc_poll_event)) {
70636- s->event = atomic_read(&proc_poll_event);
70637+ if (s->event != atomic_read_unchecked(&proc_poll_event)) {
70638+ s->event = atomic_read_unchecked(&proc_poll_event);
70639 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
70640 }
70641
70642@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inod
70643 }
70644
70645 s->seq.private = s;
70646- s->event = atomic_read(&proc_poll_event);
70647+ s->event = atomic_read_unchecked(&proc_poll_event);
70648 return ret;
70649 }
70650
70651@@ -2133,7 +2133,7 @@ SYSCALL_DEFINE2(swapon, const char __use
70652 (p->flags & SWP_DISCARDABLE) ? "D" : "");
70653
70654 mutex_unlock(&swapon_mutex);
70655- atomic_inc(&proc_poll_event);
70656+ atomic_inc_unchecked(&proc_poll_event);
70657 wake_up_interruptible(&proc_poll_wait);
70658
70659 if (S_ISREG(inode->i_mode))
70660diff -urNp linux-3.0.7/mm/util.c linux-3.0.7/mm/util.c
70661--- linux-3.0.7/mm/util.c 2011-07-21 22:17:23.000000000 -0400
70662+++ linux-3.0.7/mm/util.c 2011-08-23 21:47:56.000000000 -0400
70663@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
70664 * allocated buffer. Use this if you don't want to free the buffer immediately
70665 * like, for example, with RCU.
70666 */
70667+#undef __krealloc
70668 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
70669 {
70670 void *ret;
70671@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
70672 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
70673 * %NULL pointer, the object pointed to is freed.
70674 */
70675+#undef krealloc
70676 void *krealloc(const void *p, size_t new_size, gfp_t flags)
70677 {
70678 void *ret;
70679@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
70680 void arch_pick_mmap_layout(struct mm_struct *mm)
70681 {
70682 mm->mmap_base = TASK_UNMAPPED_BASE;
70683+
70684+#ifdef CONFIG_PAX_RANDMMAP
70685+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70686+ mm->mmap_base += mm->delta_mmap;
70687+#endif
70688+
70689 mm->get_unmapped_area = arch_get_unmapped_area;
70690 mm->unmap_area = arch_unmap_area;
70691 }
70692diff -urNp linux-3.0.7/mm/vmalloc.c linux-3.0.7/mm/vmalloc.c
70693--- linux-3.0.7/mm/vmalloc.c 2011-10-16 21:54:54.000000000 -0400
70694+++ linux-3.0.7/mm/vmalloc.c 2011-10-16 21:55:28.000000000 -0400
70695@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70696
70697 pte = pte_offset_kernel(pmd, addr);
70698 do {
70699- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70700- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70701+
70702+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70703+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70704+ BUG_ON(!pte_exec(*pte));
70705+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70706+ continue;
70707+ }
70708+#endif
70709+
70710+ {
70711+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70712+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70713+ }
70714 } while (pte++, addr += PAGE_SIZE, addr != end);
70715 }
70716
70717@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70718 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70719 {
70720 pte_t *pte;
70721+ int ret = -ENOMEM;
70722
70723 /*
70724 * nr is a running index into the array which helps higher level
70725@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
70726 pte = pte_alloc_kernel(pmd, addr);
70727 if (!pte)
70728 return -ENOMEM;
70729+
70730+ pax_open_kernel();
70731 do {
70732 struct page *page = pages[*nr];
70733
70734- if (WARN_ON(!pte_none(*pte)))
70735- return -EBUSY;
70736- if (WARN_ON(!page))
70737- return -ENOMEM;
70738+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70739+ if (pgprot_val(prot) & _PAGE_NX)
70740+#endif
70741+
70742+ if (WARN_ON(!pte_none(*pte))) {
70743+ ret = -EBUSY;
70744+ goto out;
70745+ }
70746+ if (WARN_ON(!page)) {
70747+ ret = -ENOMEM;
70748+ goto out;
70749+ }
70750 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70751 (*nr)++;
70752 } while (pte++, addr += PAGE_SIZE, addr != end);
70753- return 0;
70754+ ret = 0;
70755+out:
70756+ pax_close_kernel();
70757+ return ret;
70758 }
70759
70760 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70761@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
70762 * and fall back on vmalloc() if that fails. Others
70763 * just put it in the vmalloc space.
70764 */
70765-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70766+#ifdef CONFIG_MODULES
70767+#ifdef MODULES_VADDR
70768 unsigned long addr = (unsigned long)x;
70769 if (addr >= MODULES_VADDR && addr < MODULES_END)
70770 return 1;
70771 #endif
70772+
70773+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70774+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70775+ return 1;
70776+#endif
70777+
70778+#endif
70779+
70780 return is_vmalloc_addr(x);
70781 }
70782
70783@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
70784
70785 if (!pgd_none(*pgd)) {
70786 pud_t *pud = pud_offset(pgd, addr);
70787+#ifdef CONFIG_X86
70788+ if (!pud_large(*pud))
70789+#endif
70790 if (!pud_none(*pud)) {
70791 pmd_t *pmd = pmd_offset(pud, addr);
70792+#ifdef CONFIG_X86
70793+ if (!pmd_large(*pmd))
70794+#endif
70795 if (!pmd_none(*pmd)) {
70796 pte_t *ptep, pte;
70797
70798@@ -1297,6 +1337,16 @@ static struct vm_struct *__get_vm_area_n
70799 struct vm_struct *area;
70800
70801 BUG_ON(in_interrupt());
70802+
70803+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70804+ if (flags & VM_KERNEXEC) {
70805+ if (start != VMALLOC_START || end != VMALLOC_END)
70806+ return NULL;
70807+ start = (unsigned long)MODULES_EXEC_VADDR;
70808+ end = (unsigned long)MODULES_EXEC_END;
70809+ }
70810+#endif
70811+
70812 if (flags & VM_IOREMAP) {
70813 int bit = fls(size);
70814
70815@@ -1515,6 +1565,11 @@ void *vmap(struct page **pages, unsigned
70816 if (count > totalram_pages)
70817 return NULL;
70818
70819+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70820+ if (!(pgprot_val(prot) & _PAGE_NX))
70821+ flags |= VM_KERNEXEC;
70822+#endif
70823+
70824 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70825 __builtin_return_address(0));
70826 if (!area)
70827@@ -1616,6 +1671,13 @@ void *__vmalloc_node_range(unsigned long
70828 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70829 return NULL;
70830
70831+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70832+ if (!(pgprot_val(prot) & _PAGE_NX))
70833+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70834+ node, gfp_mask, caller);
70835+ else
70836+#endif
70837+
70838 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
70839 gfp_mask, caller);
70840
70841@@ -1655,6 +1717,7 @@ static void *__vmalloc_node(unsigned lon
70842 gfp_mask, prot, node, caller);
70843 }
70844
70845+#undef __vmalloc
70846 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70847 {
70848 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70849@@ -1678,6 +1741,7 @@ static inline void *__vmalloc_node_flags
70850 * For tight control over page level allocator and protection flags
70851 * use __vmalloc() instead.
70852 */
70853+#undef vmalloc
70854 void *vmalloc(unsigned long size)
70855 {
70856 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
70857@@ -1694,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc);
70858 * For tight control over page level allocator and protection flags
70859 * use __vmalloc() instead.
70860 */
70861+#undef vzalloc
70862 void *vzalloc(unsigned long size)
70863 {
70864 return __vmalloc_node_flags(size, -1,
70865@@ -1708,6 +1773,7 @@ EXPORT_SYMBOL(vzalloc);
70866 * The resulting memory area is zeroed so it can be mapped to userspace
70867 * without leaking data.
70868 */
70869+#undef vmalloc_user
70870 void *vmalloc_user(unsigned long size)
70871 {
70872 struct vm_struct *area;
70873@@ -1735,6 +1801,7 @@ EXPORT_SYMBOL(vmalloc_user);
70874 * For tight control over page level allocator and protection flags
70875 * use __vmalloc() instead.
70876 */
70877+#undef vmalloc_node
70878 void *vmalloc_node(unsigned long size, int node)
70879 {
70880 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70881@@ -1754,6 +1821,7 @@ EXPORT_SYMBOL(vmalloc_node);
70882 * For tight control over page level allocator and protection flags
70883 * use __vmalloc_node() instead.
70884 */
70885+#undef vzalloc_node
70886 void *vzalloc_node(unsigned long size, int node)
70887 {
70888 return __vmalloc_node_flags(size, node,
70889@@ -1776,10 +1844,10 @@ EXPORT_SYMBOL(vzalloc_node);
70890 * For tight control over page level allocator and protection flags
70891 * use __vmalloc() instead.
70892 */
70893-
70894+#undef vmalloc_exec
70895 void *vmalloc_exec(unsigned long size)
70896 {
70897- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70898+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70899 -1, __builtin_return_address(0));
70900 }
70901
70902@@ -1798,6 +1866,7 @@ void *vmalloc_exec(unsigned long size)
70903 * Allocate enough 32bit PA addressable pages to cover @size from the
70904 * page level allocator and map them into contiguous kernel virtual space.
70905 */
70906+#undef vmalloc_32
70907 void *vmalloc_32(unsigned long size)
70908 {
70909 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70910@@ -1812,6 +1881,7 @@ EXPORT_SYMBOL(vmalloc_32);
70911 * The resulting memory area is 32bit addressable and zeroed so it can be
70912 * mapped to userspace without leaking data.
70913 */
70914+#undef vmalloc_32_user
70915 void *vmalloc_32_user(unsigned long size)
70916 {
70917 struct vm_struct *area;
70918@@ -2074,6 +2144,8 @@ int remap_vmalloc_range(struct vm_area_s
70919 unsigned long uaddr = vma->vm_start;
70920 unsigned long usize = vma->vm_end - vma->vm_start;
70921
70922+ BUG_ON(vma->vm_mirror);
70923+
70924 if ((PAGE_SIZE-1) & (unsigned long)addr)
70925 return -EINVAL;
70926
70927diff -urNp linux-3.0.7/mm/vmstat.c linux-3.0.7/mm/vmstat.c
70928--- linux-3.0.7/mm/vmstat.c 2011-07-21 22:17:23.000000000 -0400
70929+++ linux-3.0.7/mm/vmstat.c 2011-08-23 21:48:14.000000000 -0400
70930@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
70931 *
70932 * vm_stat contains the global counters
70933 */
70934-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70935+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70936 EXPORT_SYMBOL(vm_stat);
70937
70938 #ifdef CONFIG_SMP
70939@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
70940 v = p->vm_stat_diff[i];
70941 p->vm_stat_diff[i] = 0;
70942 local_irq_restore(flags);
70943- atomic_long_add(v, &zone->vm_stat[i]);
70944+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70945 global_diff[i] += v;
70946 #ifdef CONFIG_NUMA
70947 /* 3 seconds idle till flush */
70948@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
70949
70950 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70951 if (global_diff[i])
70952- atomic_long_add(global_diff[i], &vm_stat[i]);
70953+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70954 }
70955
70956 #endif
70957@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
70958 start_cpu_timer(cpu);
70959 #endif
70960 #ifdef CONFIG_PROC_FS
70961- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70962- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70963- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70964- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70965+ {
70966+ mode_t gr_mode = S_IRUGO;
70967+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70968+ gr_mode = S_IRUSR;
70969+#endif
70970+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70971+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70972+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70973+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70974+#else
70975+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70976+#endif
70977+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70978+ }
70979 #endif
70980 return 0;
70981 }
70982diff -urNp linux-3.0.7/net/8021q/vlan.c linux-3.0.7/net/8021q/vlan.c
70983--- linux-3.0.7/net/8021q/vlan.c 2011-07-21 22:17:23.000000000 -0400
70984+++ linux-3.0.7/net/8021q/vlan.c 2011-08-23 21:47:56.000000000 -0400
70985@@ -591,8 +591,7 @@ static int vlan_ioctl_handler(struct net
70986 err = -EPERM;
70987 if (!capable(CAP_NET_ADMIN))
70988 break;
70989- if ((args.u.name_type >= 0) &&
70990- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70991+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70992 struct vlan_net *vn;
70993
70994 vn = net_generic(net, vlan_net_id);
70995diff -urNp linux-3.0.7/net/9p/trans_fd.c linux-3.0.7/net/9p/trans_fd.c
70996--- linux-3.0.7/net/9p/trans_fd.c 2011-07-21 22:17:23.000000000 -0400
70997+++ linux-3.0.7/net/9p/trans_fd.c 2011-10-06 04:17:55.000000000 -0400
70998@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
70999 oldfs = get_fs();
71000 set_fs(get_ds());
71001 /* The cast to a user pointer is valid due to the set_fs() */
71002- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71003+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71004 set_fs(oldfs);
71005
71006 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71007diff -urNp linux-3.0.7/net/9p/trans_virtio.c linux-3.0.7/net/9p/trans_virtio.c
71008--- linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:54:54.000000000 -0400
71009+++ linux-3.0.7/net/9p/trans_virtio.c 2011-10-16 21:55:28.000000000 -0400
71010@@ -327,7 +327,7 @@ req_retry_pinned:
71011 } else {
71012 char *pbuf;
71013 if (req->tc->pubuf)
71014- pbuf = (__force char *) req->tc->pubuf;
71015+ pbuf = (char __force_kernel *) req->tc->pubuf;
71016 else
71017 pbuf = req->tc->pkbuf;
71018 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71019@@ -357,7 +357,7 @@ req_retry_pinned:
71020 } else {
71021 char *pbuf;
71022 if (req->tc->pubuf)
71023- pbuf = (__force char *) req->tc->pubuf;
71024+ pbuf = (char __force_kernel *) req->tc->pubuf;
71025 else
71026 pbuf = req->tc->pkbuf;
71027
71028diff -urNp linux-3.0.7/net/atm/atm_misc.c linux-3.0.7/net/atm/atm_misc.c
71029--- linux-3.0.7/net/atm/atm_misc.c 2011-07-21 22:17:23.000000000 -0400
71030+++ linux-3.0.7/net/atm/atm_misc.c 2011-08-23 21:47:56.000000000 -0400
71031@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71032 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71033 return 1;
71034 atm_return(vcc, truesize);
71035- atomic_inc(&vcc->stats->rx_drop);
71036+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71037 return 0;
71038 }
71039 EXPORT_SYMBOL(atm_charge);
71040@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71041 }
71042 }
71043 atm_return(vcc, guess);
71044- atomic_inc(&vcc->stats->rx_drop);
71045+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71046 return NULL;
71047 }
71048 EXPORT_SYMBOL(atm_alloc_charge);
71049@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71050
71051 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71052 {
71053-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71054+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71055 __SONET_ITEMS
71056 #undef __HANDLE_ITEM
71057 }
71058@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71059
71060 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71061 {
71062-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71063+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71064 __SONET_ITEMS
71065 #undef __HANDLE_ITEM
71066 }
71067diff -urNp linux-3.0.7/net/atm/lec.h linux-3.0.7/net/atm/lec.h
71068--- linux-3.0.7/net/atm/lec.h 2011-07-21 22:17:23.000000000 -0400
71069+++ linux-3.0.7/net/atm/lec.h 2011-08-23 21:47:56.000000000 -0400
71070@@ -48,7 +48,7 @@ struct lane2_ops {
71071 const u8 *tlvs, u32 sizeoftlvs);
71072 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71073 const u8 *tlvs, u32 sizeoftlvs);
71074-};
71075+} __no_const;
71076
71077 /*
71078 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71079diff -urNp linux-3.0.7/net/atm/mpc.h linux-3.0.7/net/atm/mpc.h
71080--- linux-3.0.7/net/atm/mpc.h 2011-07-21 22:17:23.000000000 -0400
71081+++ linux-3.0.7/net/atm/mpc.h 2011-08-23 21:47:56.000000000 -0400
71082@@ -33,7 +33,7 @@ struct mpoa_client {
71083 struct mpc_parameters parameters; /* parameters for this client */
71084
71085 const struct net_device_ops *old_ops;
71086- struct net_device_ops new_ops;
71087+ net_device_ops_no_const new_ops;
71088 };
71089
71090
71091diff -urNp linux-3.0.7/net/atm/mpoa_caches.c linux-3.0.7/net/atm/mpoa_caches.c
71092--- linux-3.0.7/net/atm/mpoa_caches.c 2011-07-21 22:17:23.000000000 -0400
71093+++ linux-3.0.7/net/atm/mpoa_caches.c 2011-08-23 21:48:14.000000000 -0400
71094@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71095 struct timeval now;
71096 struct k_message msg;
71097
71098+ pax_track_stack();
71099+
71100 do_gettimeofday(&now);
71101
71102 read_lock_bh(&client->ingress_lock);
71103diff -urNp linux-3.0.7/net/atm/proc.c linux-3.0.7/net/atm/proc.c
71104--- linux-3.0.7/net/atm/proc.c 2011-07-21 22:17:23.000000000 -0400
71105+++ linux-3.0.7/net/atm/proc.c 2011-08-23 21:47:56.000000000 -0400
71106@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71107 const struct k_atm_aal_stats *stats)
71108 {
71109 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71110- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71111- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71112- atomic_read(&stats->rx_drop));
71113+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71114+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71115+ atomic_read_unchecked(&stats->rx_drop));
71116 }
71117
71118 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71119diff -urNp linux-3.0.7/net/atm/resources.c linux-3.0.7/net/atm/resources.c
71120--- linux-3.0.7/net/atm/resources.c 2011-07-21 22:17:23.000000000 -0400
71121+++ linux-3.0.7/net/atm/resources.c 2011-08-23 21:47:56.000000000 -0400
71122@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71123 static void copy_aal_stats(struct k_atm_aal_stats *from,
71124 struct atm_aal_stats *to)
71125 {
71126-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71127+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71128 __AAL_STAT_ITEMS
71129 #undef __HANDLE_ITEM
71130 }
71131@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71132 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71133 struct atm_aal_stats *to)
71134 {
71135-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71136+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71137 __AAL_STAT_ITEMS
71138 #undef __HANDLE_ITEM
71139 }
71140diff -urNp linux-3.0.7/net/batman-adv/hard-interface.c linux-3.0.7/net/batman-adv/hard-interface.c
71141--- linux-3.0.7/net/batman-adv/hard-interface.c 2011-07-21 22:17:23.000000000 -0400
71142+++ linux-3.0.7/net/batman-adv/hard-interface.c 2011-08-23 21:47:56.000000000 -0400
71143@@ -351,8 +351,8 @@ int hardif_enable_interface(struct hard_
71144 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71145 dev_add_pack(&hard_iface->batman_adv_ptype);
71146
71147- atomic_set(&hard_iface->seqno, 1);
71148- atomic_set(&hard_iface->frag_seqno, 1);
71149+ atomic_set_unchecked(&hard_iface->seqno, 1);
71150+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71151 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71152 hard_iface->net_dev->name);
71153
71154diff -urNp linux-3.0.7/net/batman-adv/routing.c linux-3.0.7/net/batman-adv/routing.c
71155--- linux-3.0.7/net/batman-adv/routing.c 2011-07-21 22:17:23.000000000 -0400
71156+++ linux-3.0.7/net/batman-adv/routing.c 2011-08-23 21:47:56.000000000 -0400
71157@@ -627,7 +627,7 @@ void receive_bat_packet(struct ethhdr *e
71158 return;
71159
71160 /* could be changed by schedule_own_packet() */
71161- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71162+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71163
71164 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71165
71166diff -urNp linux-3.0.7/net/batman-adv/send.c linux-3.0.7/net/batman-adv/send.c
71167--- linux-3.0.7/net/batman-adv/send.c 2011-07-21 22:17:23.000000000 -0400
71168+++ linux-3.0.7/net/batman-adv/send.c 2011-08-23 21:47:56.000000000 -0400
71169@@ -279,7 +279,7 @@ void schedule_own_packet(struct hard_ifa
71170
71171 /* change sequence number to network order */
71172 batman_packet->seqno =
71173- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71174+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71175
71176 if (vis_server == VIS_TYPE_SERVER_SYNC)
71177 batman_packet->flags |= VIS_SERVER;
71178@@ -293,7 +293,7 @@ void schedule_own_packet(struct hard_ifa
71179 else
71180 batman_packet->gw_flags = 0;
71181
71182- atomic_inc(&hard_iface->seqno);
71183+ atomic_inc_unchecked(&hard_iface->seqno);
71184
71185 slide_own_bcast_window(hard_iface);
71186 send_time = own_send_time(bat_priv);
71187diff -urNp linux-3.0.7/net/batman-adv/soft-interface.c linux-3.0.7/net/batman-adv/soft-interface.c
71188--- linux-3.0.7/net/batman-adv/soft-interface.c 2011-07-21 22:17:23.000000000 -0400
71189+++ linux-3.0.7/net/batman-adv/soft-interface.c 2011-08-23 21:47:56.000000000 -0400
71190@@ -628,7 +628,7 @@ int interface_tx(struct sk_buff *skb, st
71191
71192 /* set broadcast sequence number */
71193 bcast_packet->seqno =
71194- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71195+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71196
71197 add_bcast_packet_to_list(bat_priv, skb);
71198
71199@@ -830,7 +830,7 @@ struct net_device *softif_create(char *n
71200 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71201
71202 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71203- atomic_set(&bat_priv->bcast_seqno, 1);
71204+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71205 atomic_set(&bat_priv->tt_local_changed, 0);
71206
71207 bat_priv->primary_if = NULL;
71208diff -urNp linux-3.0.7/net/batman-adv/types.h linux-3.0.7/net/batman-adv/types.h
71209--- linux-3.0.7/net/batman-adv/types.h 2011-07-21 22:17:23.000000000 -0400
71210+++ linux-3.0.7/net/batman-adv/types.h 2011-08-23 21:47:56.000000000 -0400
71211@@ -38,8 +38,8 @@ struct hard_iface {
71212 int16_t if_num;
71213 char if_status;
71214 struct net_device *net_dev;
71215- atomic_t seqno;
71216- atomic_t frag_seqno;
71217+ atomic_unchecked_t seqno;
71218+ atomic_unchecked_t frag_seqno;
71219 unsigned char *packet_buff;
71220 int packet_len;
71221 struct kobject *hardif_obj;
71222@@ -142,7 +142,7 @@ struct bat_priv {
71223 atomic_t orig_interval; /* uint */
71224 atomic_t hop_penalty; /* uint */
71225 atomic_t log_level; /* uint */
71226- atomic_t bcast_seqno;
71227+ atomic_unchecked_t bcast_seqno;
71228 atomic_t bcast_queue_left;
71229 atomic_t batman_queue_left;
71230 char num_ifaces;
71231diff -urNp linux-3.0.7/net/batman-adv/unicast.c linux-3.0.7/net/batman-adv/unicast.c
71232--- linux-3.0.7/net/batman-adv/unicast.c 2011-07-21 22:17:23.000000000 -0400
71233+++ linux-3.0.7/net/batman-adv/unicast.c 2011-08-23 21:47:56.000000000 -0400
71234@@ -265,7 +265,7 @@ int frag_send_skb(struct sk_buff *skb, s
71235 frag1->flags = UNI_FRAG_HEAD | large_tail;
71236 frag2->flags = large_tail;
71237
71238- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71239+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71240 frag1->seqno = htons(seqno - 1);
71241 frag2->seqno = htons(seqno);
71242
71243diff -urNp linux-3.0.7/net/bridge/br_multicast.c linux-3.0.7/net/bridge/br_multicast.c
71244--- linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:54:54.000000000 -0400
71245+++ linux-3.0.7/net/bridge/br_multicast.c 2011-10-16 21:55:28.000000000 -0400
71246@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71247 nexthdr = ip6h->nexthdr;
71248 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71249
71250- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71251+ if (nexthdr != IPPROTO_ICMPV6)
71252 return 0;
71253
71254 /* Okay, we found ICMPv6 header */
71255diff -urNp linux-3.0.7/net/bridge/netfilter/ebtables.c linux-3.0.7/net/bridge/netfilter/ebtables.c
71256--- linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-07-21 22:17:23.000000000 -0400
71257+++ linux-3.0.7/net/bridge/netfilter/ebtables.c 2011-08-23 21:48:14.000000000 -0400
71258@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
71259 tmp.valid_hooks = t->table->valid_hooks;
71260 }
71261 mutex_unlock(&ebt_mutex);
71262- if (copy_to_user(user, &tmp, *len) != 0){
71263+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71264 BUGPRINT("c2u Didn't work\n");
71265 ret = -EFAULT;
71266 break;
71267@@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
71268 int ret;
71269 void __user *pos;
71270
71271+ pax_track_stack();
71272+
71273 memset(&tinfo, 0, sizeof(tinfo));
71274
71275 if (cmd == EBT_SO_GET_ENTRIES) {
71276diff -urNp linux-3.0.7/net/caif/caif_socket.c linux-3.0.7/net/caif/caif_socket.c
71277--- linux-3.0.7/net/caif/caif_socket.c 2011-07-21 22:17:23.000000000 -0400
71278+++ linux-3.0.7/net/caif/caif_socket.c 2011-08-23 21:47:56.000000000 -0400
71279@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71280 #ifdef CONFIG_DEBUG_FS
71281 struct debug_fs_counter {
71282 atomic_t caif_nr_socks;
71283- atomic_t caif_sock_create;
71284- atomic_t num_connect_req;
71285- atomic_t num_connect_resp;
71286- atomic_t num_connect_fail_resp;
71287- atomic_t num_disconnect;
71288- atomic_t num_remote_shutdown_ind;
71289- atomic_t num_tx_flow_off_ind;
71290- atomic_t num_tx_flow_on_ind;
71291- atomic_t num_rx_flow_off;
71292- atomic_t num_rx_flow_on;
71293+ atomic_unchecked_t caif_sock_create;
71294+ atomic_unchecked_t num_connect_req;
71295+ atomic_unchecked_t num_connect_resp;
71296+ atomic_unchecked_t num_connect_fail_resp;
71297+ atomic_unchecked_t num_disconnect;
71298+ atomic_unchecked_t num_remote_shutdown_ind;
71299+ atomic_unchecked_t num_tx_flow_off_ind;
71300+ atomic_unchecked_t num_tx_flow_on_ind;
71301+ atomic_unchecked_t num_rx_flow_off;
71302+ atomic_unchecked_t num_rx_flow_on;
71303 };
71304 static struct debug_fs_counter cnt;
71305 #define dbfs_atomic_inc(v) atomic_inc_return(v)
71306+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71307 #define dbfs_atomic_dec(v) atomic_dec_return(v)
71308 #else
71309 #define dbfs_atomic_inc(v) 0
71310@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71311 atomic_read(&cf_sk->sk.sk_rmem_alloc),
71312 sk_rcvbuf_lowwater(cf_sk));
71313 set_rx_flow_off(cf_sk);
71314- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71315+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71316 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71317 }
71318
71319@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71320 set_rx_flow_off(cf_sk);
71321 if (net_ratelimit())
71322 pr_debug("sending flow OFF due to rmem_schedule\n");
71323- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71324+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71325 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71326 }
71327 skb->dev = NULL;
71328@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71329 switch (flow) {
71330 case CAIF_CTRLCMD_FLOW_ON_IND:
71331 /* OK from modem to start sending again */
71332- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71333+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71334 set_tx_flow_on(cf_sk);
71335 cf_sk->sk.sk_state_change(&cf_sk->sk);
71336 break;
71337
71338 case CAIF_CTRLCMD_FLOW_OFF_IND:
71339 /* Modem asks us to shut up */
71340- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71341+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71342 set_tx_flow_off(cf_sk);
71343 cf_sk->sk.sk_state_change(&cf_sk->sk);
71344 break;
71345@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71346 /* We're now connected */
71347 caif_client_register_refcnt(&cf_sk->layer,
71348 cfsk_hold, cfsk_put);
71349- dbfs_atomic_inc(&cnt.num_connect_resp);
71350+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71351 cf_sk->sk.sk_state = CAIF_CONNECTED;
71352 set_tx_flow_on(cf_sk);
71353 cf_sk->sk.sk_state_change(&cf_sk->sk);
71354@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71355
71356 case CAIF_CTRLCMD_INIT_FAIL_RSP:
71357 /* Connect request failed */
71358- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71359+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71360 cf_sk->sk.sk_err = ECONNREFUSED;
71361 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71362 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71363@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71364
71365 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71366 /* Modem has closed this connection, or device is down. */
71367- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71368+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71369 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71370 cf_sk->sk.sk_err = ECONNRESET;
71371 set_rx_flow_on(cf_sk);
71372@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71373 return;
71374
71375 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71376- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71377+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71378 set_rx_flow_on(cf_sk);
71379 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71380 }
71381@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71382 /*ifindex = id of the interface.*/
71383 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71384
71385- dbfs_atomic_inc(&cnt.num_connect_req);
71386+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71387 cf_sk->layer.receive = caif_sktrecv_cb;
71388
71389 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71390@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71391 spin_unlock_bh(&sk->sk_receive_queue.lock);
71392 sock->sk = NULL;
71393
71394- dbfs_atomic_inc(&cnt.num_disconnect);
71395+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71396
71397 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71398 if (cf_sk->debugfs_socket_dir != NULL)
71399@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71400 cf_sk->conn_req.protocol = protocol;
71401 /* Increase the number of sockets created. */
71402 dbfs_atomic_inc(&cnt.caif_nr_socks);
71403- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71404+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71405 #ifdef CONFIG_DEBUG_FS
71406 if (!IS_ERR(debugfsdir)) {
71407
71408diff -urNp linux-3.0.7/net/caif/cfctrl.c linux-3.0.7/net/caif/cfctrl.c
71409--- linux-3.0.7/net/caif/cfctrl.c 2011-07-21 22:17:23.000000000 -0400
71410+++ linux-3.0.7/net/caif/cfctrl.c 2011-08-23 21:48:14.000000000 -0400
71411@@ -9,6 +9,7 @@
71412 #include <linux/stddef.h>
71413 #include <linux/spinlock.h>
71414 #include <linux/slab.h>
71415+#include <linux/sched.h>
71416 #include <net/caif/caif_layer.h>
71417 #include <net/caif/cfpkt.h>
71418 #include <net/caif/cfctrl.h>
71419@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71420 dev_info.id = 0xff;
71421 memset(this, 0, sizeof(*this));
71422 cfsrvl_init(&this->serv, 0, &dev_info, false);
71423- atomic_set(&this->req_seq_no, 1);
71424- atomic_set(&this->rsp_seq_no, 1);
71425+ atomic_set_unchecked(&this->req_seq_no, 1);
71426+ atomic_set_unchecked(&this->rsp_seq_no, 1);
71427 this->serv.layer.receive = cfctrl_recv;
71428 sprintf(this->serv.layer.name, "ctrl");
71429 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71430@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71431 struct cfctrl_request_info *req)
71432 {
71433 spin_lock_bh(&ctrl->info_list_lock);
71434- atomic_inc(&ctrl->req_seq_no);
71435- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71436+ atomic_inc_unchecked(&ctrl->req_seq_no);
71437+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71438 list_add_tail(&req->list, &ctrl->list);
71439 spin_unlock_bh(&ctrl->info_list_lock);
71440 }
71441@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71442 if (p != first)
71443 pr_warn("Requests are not received in order\n");
71444
71445- atomic_set(&ctrl->rsp_seq_no,
71446+ atomic_set_unchecked(&ctrl->rsp_seq_no,
71447 p->sequence_no);
71448 list_del(&p->list);
71449 goto out;
71450@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71451 struct cfctrl *cfctrl = container_obj(layer);
71452 struct cfctrl_request_info rsp, *req;
71453
71454+ pax_track_stack();
71455
71456 cfpkt_extr_head(pkt, &cmdrsp, 1);
71457 cmd = cmdrsp & CFCTRL_CMD_MASK;
71458diff -urNp linux-3.0.7/net/compat.c linux-3.0.7/net/compat.c
71459--- linux-3.0.7/net/compat.c 2011-07-21 22:17:23.000000000 -0400
71460+++ linux-3.0.7/net/compat.c 2011-10-06 04:17:55.000000000 -0400
71461@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71462 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71463 __get_user(kmsg->msg_flags, &umsg->msg_flags))
71464 return -EFAULT;
71465- kmsg->msg_name = compat_ptr(tmp1);
71466- kmsg->msg_iov = compat_ptr(tmp2);
71467- kmsg->msg_control = compat_ptr(tmp3);
71468+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71469+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71470+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71471 return 0;
71472 }
71473
71474@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71475
71476 if (kern_msg->msg_namelen) {
71477 if (mode == VERIFY_READ) {
71478- int err = move_addr_to_kernel(kern_msg->msg_name,
71479+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71480 kern_msg->msg_namelen,
71481 kern_address);
71482 if (err < 0)
71483@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71484 kern_msg->msg_name = NULL;
71485
71486 tot_len = iov_from_user_compat_to_kern(kern_iov,
71487- (struct compat_iovec __user *)kern_msg->msg_iov,
71488+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71489 kern_msg->msg_iovlen);
71490 if (tot_len >= 0)
71491 kern_msg->msg_iov = kern_iov;
71492@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71493
71494 #define CMSG_COMPAT_FIRSTHDR(msg) \
71495 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71496- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71497+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71498 (struct compat_cmsghdr __user *)NULL)
71499
71500 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71501 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71502 (ucmlen) <= (unsigned long) \
71503 ((mhdr)->msg_controllen - \
71504- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71505+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71506
71507 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71508 struct compat_cmsghdr __user *cmsg, int cmsg_len)
71509 {
71510 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71511- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71512+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71513 msg->msg_controllen)
71514 return NULL;
71515 return (struct compat_cmsghdr __user *)ptr;
71516@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71517 {
71518 struct compat_timeval ctv;
71519 struct compat_timespec cts[3];
71520- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71521+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71522 struct compat_cmsghdr cmhdr;
71523 int cmlen;
71524
71525@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71526
71527 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
71528 {
71529- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71530+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71531 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
71532 int fdnum = scm->fp->count;
71533 struct file **fp = scm->fp->fp;
71534@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
71535 return -EFAULT;
71536 old_fs = get_fs();
71537 set_fs(KERNEL_DS);
71538- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
71539+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
71540 set_fs(old_fs);
71541
71542 return err;
71543@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
71544 len = sizeof(ktime);
71545 old_fs = get_fs();
71546 set_fs(KERNEL_DS);
71547- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
71548+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
71549 set_fs(old_fs);
71550
71551 if (!err) {
71552@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
71553 case MCAST_JOIN_GROUP:
71554 case MCAST_LEAVE_GROUP:
71555 {
71556- struct compat_group_req __user *gr32 = (void *)optval;
71557+ struct compat_group_req __user *gr32 = (void __user *)optval;
71558 struct group_req __user *kgr =
71559 compat_alloc_user_space(sizeof(struct group_req));
71560 u32 interface;
71561@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
71562 case MCAST_BLOCK_SOURCE:
71563 case MCAST_UNBLOCK_SOURCE:
71564 {
71565- struct compat_group_source_req __user *gsr32 = (void *)optval;
71566+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
71567 struct group_source_req __user *kgsr = compat_alloc_user_space(
71568 sizeof(struct group_source_req));
71569 u32 interface;
71570@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
71571 }
71572 case MCAST_MSFILTER:
71573 {
71574- struct compat_group_filter __user *gf32 = (void *)optval;
71575+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71576 struct group_filter __user *kgf;
71577 u32 interface, fmode, numsrc;
71578
71579@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
71580 char __user *optval, int __user *optlen,
71581 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
71582 {
71583- struct compat_group_filter __user *gf32 = (void *)optval;
71584+ struct compat_group_filter __user *gf32 = (void __user *)optval;
71585 struct group_filter __user *kgf;
71586 int __user *koptlen;
71587 u32 interface, fmode, numsrc;
71588diff -urNp linux-3.0.7/net/core/datagram.c linux-3.0.7/net/core/datagram.c
71589--- linux-3.0.7/net/core/datagram.c 2011-07-21 22:17:23.000000000 -0400
71590+++ linux-3.0.7/net/core/datagram.c 2011-08-23 21:47:56.000000000 -0400
71591@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
71592 }
71593
71594 kfree_skb(skb);
71595- atomic_inc(&sk->sk_drops);
71596+ atomic_inc_unchecked(&sk->sk_drops);
71597 sk_mem_reclaim_partial(sk);
71598
71599 return err;
71600diff -urNp linux-3.0.7/net/core/dev.c linux-3.0.7/net/core/dev.c
71601--- linux-3.0.7/net/core/dev.c 2011-07-21 22:17:23.000000000 -0400
71602+++ linux-3.0.7/net/core/dev.c 2011-08-23 21:48:14.000000000 -0400
71603@@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
71604 if (no_module && capable(CAP_NET_ADMIN))
71605 no_module = request_module("netdev-%s", name);
71606 if (no_module && capable(CAP_SYS_MODULE)) {
71607+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71608+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71609+#else
71610 if (!request_module("%s", name))
71611 pr_err("Loading kernel module for a network device "
71612 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71613 "instead\n", name);
71614+#endif
71615 }
71616 }
71617 EXPORT_SYMBOL(dev_load);
71618@@ -1959,7 +1963,7 @@ static int illegal_highdma(struct net_de
71619
71620 struct dev_gso_cb {
71621 void (*destructor)(struct sk_buff *skb);
71622-};
71623+} __no_const;
71624
71625 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71626
71627@@ -2912,7 +2916,7 @@ int netif_rx_ni(struct sk_buff *skb)
71628 }
71629 EXPORT_SYMBOL(netif_rx_ni);
71630
71631-static void net_tx_action(struct softirq_action *h)
71632+static void net_tx_action(void)
71633 {
71634 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71635
71636@@ -3761,7 +3765,7 @@ void netif_napi_del(struct napi_struct *
71637 }
71638 EXPORT_SYMBOL(netif_napi_del);
71639
71640-static void net_rx_action(struct softirq_action *h)
71641+static void net_rx_action(void)
71642 {
71643 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71644 unsigned long time_limit = jiffies + 2;
71645diff -urNp linux-3.0.7/net/core/flow.c linux-3.0.7/net/core/flow.c
71646--- linux-3.0.7/net/core/flow.c 2011-07-21 22:17:23.000000000 -0400
71647+++ linux-3.0.7/net/core/flow.c 2011-08-23 21:47:56.000000000 -0400
71648@@ -60,7 +60,7 @@ struct flow_cache {
71649 struct timer_list rnd_timer;
71650 };
71651
71652-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71653+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71654 EXPORT_SYMBOL(flow_cache_genid);
71655 static struct flow_cache flow_cache_global;
71656 static struct kmem_cache *flow_cachep __read_mostly;
71657@@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
71658
71659 static int flow_entry_valid(struct flow_cache_entry *fle)
71660 {
71661- if (atomic_read(&flow_cache_genid) != fle->genid)
71662+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
71663 return 0;
71664 if (fle->object && !fle->object->ops->check(fle->object))
71665 return 0;
71666@@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
71667 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
71668 fcp->hash_count++;
71669 }
71670- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
71671+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
71672 flo = fle->object;
71673 if (!flo)
71674 goto ret_object;
71675@@ -274,7 +274,7 @@ nocache:
71676 }
71677 flo = resolver(net, key, family, dir, flo, ctx);
71678 if (fle) {
71679- fle->genid = atomic_read(&flow_cache_genid);
71680+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71681 if (!IS_ERR(flo))
71682 fle->object = flo;
71683 else
71684diff -urNp linux-3.0.7/net/core/iovec.c linux-3.0.7/net/core/iovec.c
71685--- linux-3.0.7/net/core/iovec.c 2011-07-21 22:17:23.000000000 -0400
71686+++ linux-3.0.7/net/core/iovec.c 2011-10-06 04:17:55.000000000 -0400
71687@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
71688 if (m->msg_namelen) {
71689 if (mode == VERIFY_READ) {
71690 void __user *namep;
71691- namep = (void __user __force *) m->msg_name;
71692+ namep = (void __force_user *) m->msg_name;
71693 err = move_addr_to_kernel(namep, m->msg_namelen,
71694 address);
71695 if (err < 0)
71696@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
71697 }
71698
71699 size = m->msg_iovlen * sizeof(struct iovec);
71700- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
71701+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
71702 return -EFAULT;
71703
71704 m->msg_iov = iov;
71705diff -urNp linux-3.0.7/net/core/rtnetlink.c linux-3.0.7/net/core/rtnetlink.c
71706--- linux-3.0.7/net/core/rtnetlink.c 2011-07-21 22:17:23.000000000 -0400
71707+++ linux-3.0.7/net/core/rtnetlink.c 2011-08-23 21:47:56.000000000 -0400
71708@@ -56,7 +56,7 @@
71709 struct rtnl_link {
71710 rtnl_doit_func doit;
71711 rtnl_dumpit_func dumpit;
71712-};
71713+} __no_const;
71714
71715 static DEFINE_MUTEX(rtnl_mutex);
71716
71717diff -urNp linux-3.0.7/net/core/scm.c linux-3.0.7/net/core/scm.c
71718--- linux-3.0.7/net/core/scm.c 2011-10-16 21:54:54.000000000 -0400
71719+++ linux-3.0.7/net/core/scm.c 2011-10-16 21:55:28.000000000 -0400
71720@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
71721 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
71722 {
71723 struct cmsghdr __user *cm
71724- = (__force struct cmsghdr __user *)msg->msg_control;
71725+ = (struct cmsghdr __force_user *)msg->msg_control;
71726 struct cmsghdr cmhdr;
71727 int cmlen = CMSG_LEN(len);
71728 int err;
71729@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
71730 err = -EFAULT;
71731 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
71732 goto out;
71733- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
71734+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
71735 goto out;
71736 cmlen = CMSG_SPACE(len);
71737 if (msg->msg_controllen < cmlen)
71738@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
71739 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
71740 {
71741 struct cmsghdr __user *cm
71742- = (__force struct cmsghdr __user*)msg->msg_control;
71743+ = (struct cmsghdr __force_user *)msg->msg_control;
71744
71745 int fdmax = 0;
71746 int fdnum = scm->fp->count;
71747@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
71748 if (fdnum < fdmax)
71749 fdmax = fdnum;
71750
71751- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
71752+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
71753 i++, cmfptr++)
71754 {
71755 int new_fd;
71756diff -urNp linux-3.0.7/net/core/skbuff.c linux-3.0.7/net/core/skbuff.c
71757--- linux-3.0.7/net/core/skbuff.c 2011-07-21 22:17:23.000000000 -0400
71758+++ linux-3.0.7/net/core/skbuff.c 2011-08-23 21:48:14.000000000 -0400
71759@@ -1543,6 +1543,8 @@ int skb_splice_bits(struct sk_buff *skb,
71760 struct sock *sk = skb->sk;
71761 int ret = 0;
71762
71763+ pax_track_stack();
71764+
71765 if (splice_grow_spd(pipe, &spd))
71766 return -ENOMEM;
71767
71768diff -urNp linux-3.0.7/net/core/sock.c linux-3.0.7/net/core/sock.c
71769--- linux-3.0.7/net/core/sock.c 2011-07-21 22:17:23.000000000 -0400
71770+++ linux-3.0.7/net/core/sock.c 2011-08-23 21:48:14.000000000 -0400
71771@@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71772 */
71773 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
71774 (unsigned)sk->sk_rcvbuf) {
71775- atomic_inc(&sk->sk_drops);
71776+ atomic_inc_unchecked(&sk->sk_drops);
71777 return -ENOMEM;
71778 }
71779
71780@@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71781 return err;
71782
71783 if (!sk_rmem_schedule(sk, skb->truesize)) {
71784- atomic_inc(&sk->sk_drops);
71785+ atomic_inc_unchecked(&sk->sk_drops);
71786 return -ENOBUFS;
71787 }
71788
71789@@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
71790 skb_dst_force(skb);
71791
71792 spin_lock_irqsave(&list->lock, flags);
71793- skb->dropcount = atomic_read(&sk->sk_drops);
71794+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
71795 __skb_queue_tail(list, skb);
71796 spin_unlock_irqrestore(&list->lock, flags);
71797
71798@@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
71799 skb->dev = NULL;
71800
71801 if (sk_rcvqueues_full(sk, skb)) {
71802- atomic_inc(&sk->sk_drops);
71803+ atomic_inc_unchecked(&sk->sk_drops);
71804 goto discard_and_relse;
71805 }
71806 if (nested)
71807@@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
71808 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
71809 } else if (sk_add_backlog(sk, skb)) {
71810 bh_unlock_sock(sk);
71811- atomic_inc(&sk->sk_drops);
71812+ atomic_inc_unchecked(&sk->sk_drops);
71813 goto discard_and_relse;
71814 }
71815
71816@@ -921,7 +921,7 @@ int sock_getsockopt(struct socket *sock,
71817 if (len > sizeof(peercred))
71818 len = sizeof(peercred);
71819 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
71820- if (copy_to_user(optval, &peercred, len))
71821+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
71822 return -EFAULT;
71823 goto lenout;
71824 }
71825@@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
71826 return -ENOTCONN;
71827 if (lv < len)
71828 return -EINVAL;
71829- if (copy_to_user(optval, address, len))
71830+ if (len > sizeof(address) || copy_to_user(optval, address, len))
71831 return -EFAULT;
71832 goto lenout;
71833 }
71834@@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
71835
71836 if (len > lv)
71837 len = lv;
71838- if (copy_to_user(optval, &v, len))
71839+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
71840 return -EFAULT;
71841 lenout:
71842 if (put_user(len, optlen))
71843@@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
71844 */
71845 smp_wmb();
71846 atomic_set(&sk->sk_refcnt, 1);
71847- atomic_set(&sk->sk_drops, 0);
71848+ atomic_set_unchecked(&sk->sk_drops, 0);
71849 }
71850 EXPORT_SYMBOL(sock_init_data);
71851
71852diff -urNp linux-3.0.7/net/decnet/sysctl_net_decnet.c linux-3.0.7/net/decnet/sysctl_net_decnet.c
71853--- linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-07-21 22:17:23.000000000 -0400
71854+++ linux-3.0.7/net/decnet/sysctl_net_decnet.c 2011-08-23 21:47:56.000000000 -0400
71855@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
71856
71857 if (len > *lenp) len = *lenp;
71858
71859- if (copy_to_user(buffer, addr, len))
71860+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71861 return -EFAULT;
71862
71863 *lenp = len;
71864@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
71865
71866 if (len > *lenp) len = *lenp;
71867
71868- if (copy_to_user(buffer, devname, len))
71869+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71870 return -EFAULT;
71871
71872 *lenp = len;
71873diff -urNp linux-3.0.7/net/econet/Kconfig linux-3.0.7/net/econet/Kconfig
71874--- linux-3.0.7/net/econet/Kconfig 2011-07-21 22:17:23.000000000 -0400
71875+++ linux-3.0.7/net/econet/Kconfig 2011-08-23 21:48:14.000000000 -0400
71876@@ -4,7 +4,7 @@
71877
71878 config ECONET
71879 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71880- depends on EXPERIMENTAL && INET
71881+ depends on EXPERIMENTAL && INET && BROKEN
71882 ---help---
71883 Econet is a fairly old and slow networking protocol mainly used by
71884 Acorn computers to access file and print servers. It uses native
71885diff -urNp linux-3.0.7/net/ipv4/fib_frontend.c linux-3.0.7/net/ipv4/fib_frontend.c
71886--- linux-3.0.7/net/ipv4/fib_frontend.c 2011-07-21 22:17:23.000000000 -0400
71887+++ linux-3.0.7/net/ipv4/fib_frontend.c 2011-08-23 21:47:56.000000000 -0400
71888@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
71889 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71890 fib_sync_up(dev);
71891 #endif
71892- atomic_inc(&net->ipv4.dev_addr_genid);
71893+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71894 rt_cache_flush(dev_net(dev), -1);
71895 break;
71896 case NETDEV_DOWN:
71897 fib_del_ifaddr(ifa, NULL);
71898- atomic_inc(&net->ipv4.dev_addr_genid);
71899+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71900 if (ifa->ifa_dev->ifa_list == NULL) {
71901 /* Last address was deleted from this interface.
71902 * Disable IP.
71903@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
71904 #ifdef CONFIG_IP_ROUTE_MULTIPATH
71905 fib_sync_up(dev);
71906 #endif
71907- atomic_inc(&net->ipv4.dev_addr_genid);
71908+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
71909 rt_cache_flush(dev_net(dev), -1);
71910 break;
71911 case NETDEV_DOWN:
71912diff -urNp linux-3.0.7/net/ipv4/fib_semantics.c linux-3.0.7/net/ipv4/fib_semantics.c
71913--- linux-3.0.7/net/ipv4/fib_semantics.c 2011-07-21 22:17:23.000000000 -0400
71914+++ linux-3.0.7/net/ipv4/fib_semantics.c 2011-08-23 21:47:56.000000000 -0400
71915@@ -691,7 +691,7 @@ __be32 fib_info_update_nh_saddr(struct n
71916 nh->nh_saddr = inet_select_addr(nh->nh_dev,
71917 nh->nh_gw,
71918 nh->nh_parent->fib_scope);
71919- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
71920+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
71921
71922 return nh->nh_saddr;
71923 }
71924diff -urNp linux-3.0.7/net/ipv4/inet_diag.c linux-3.0.7/net/ipv4/inet_diag.c
71925--- linux-3.0.7/net/ipv4/inet_diag.c 2011-07-21 22:17:23.000000000 -0400
71926+++ linux-3.0.7/net/ipv4/inet_diag.c 2011-08-23 21:48:14.000000000 -0400
71927@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
71928 r->idiag_retrans = 0;
71929
71930 r->id.idiag_if = sk->sk_bound_dev_if;
71931+
71932+#ifdef CONFIG_GRKERNSEC_HIDESYM
71933+ r->id.idiag_cookie[0] = 0;
71934+ r->id.idiag_cookie[1] = 0;
71935+#else
71936 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71937 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71938+#endif
71939
71940 r->id.idiag_sport = inet->inet_sport;
71941 r->id.idiag_dport = inet->inet_dport;
71942@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
71943 r->idiag_family = tw->tw_family;
71944 r->idiag_retrans = 0;
71945 r->id.idiag_if = tw->tw_bound_dev_if;
71946+
71947+#ifdef CONFIG_GRKERNSEC_HIDESYM
71948+ r->id.idiag_cookie[0] = 0;
71949+ r->id.idiag_cookie[1] = 0;
71950+#else
71951 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71952 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71953+#endif
71954+
71955 r->id.idiag_sport = tw->tw_sport;
71956 r->id.idiag_dport = tw->tw_dport;
71957 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71958@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
71959 if (sk == NULL)
71960 goto unlock;
71961
71962+#ifndef CONFIG_GRKERNSEC_HIDESYM
71963 err = -ESTALE;
71964 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71965 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71966 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71967 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71968 goto out;
71969+#endif
71970
71971 err = -ENOMEM;
71972 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71973@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
71974 r->idiag_retrans = req->retrans;
71975
71976 r->id.idiag_if = sk->sk_bound_dev_if;
71977+
71978+#ifdef CONFIG_GRKERNSEC_HIDESYM
71979+ r->id.idiag_cookie[0] = 0;
71980+ r->id.idiag_cookie[1] = 0;
71981+#else
71982 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71983 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71984+#endif
71985
71986 tmo = req->expires - jiffies;
71987 if (tmo < 0)
71988diff -urNp linux-3.0.7/net/ipv4/inet_hashtables.c linux-3.0.7/net/ipv4/inet_hashtables.c
71989--- linux-3.0.7/net/ipv4/inet_hashtables.c 2011-09-02 18:11:21.000000000 -0400
71990+++ linux-3.0.7/net/ipv4/inet_hashtables.c 2011-08-23 21:55:24.000000000 -0400
71991@@ -18,12 +18,15 @@
71992 #include <linux/sched.h>
71993 #include <linux/slab.h>
71994 #include <linux/wait.h>
71995+#include <linux/security.h>
71996
71997 #include <net/inet_connection_sock.h>
71998 #include <net/inet_hashtables.h>
71999 #include <net/secure_seq.h>
72000 #include <net/ip.h>
72001
72002+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72003+
72004 /*
72005 * Allocate and initialize a new local port bind bucket.
72006 * The bindhash mutex for snum's hash chain must be held here.
72007@@ -530,6 +533,8 @@ ok:
72008 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72009 spin_unlock(&head->lock);
72010
72011+ gr_update_task_in_ip_table(current, inet_sk(sk));
72012+
72013 if (tw) {
72014 inet_twsk_deschedule(tw, death_row);
72015 while (twrefcnt) {
72016diff -urNp linux-3.0.7/net/ipv4/inetpeer.c linux-3.0.7/net/ipv4/inetpeer.c
72017--- linux-3.0.7/net/ipv4/inetpeer.c 2011-09-02 18:11:21.000000000 -0400
72018+++ linux-3.0.7/net/ipv4/inetpeer.c 2011-08-23 21:48:14.000000000 -0400
72019@@ -481,6 +481,8 @@ struct inet_peer *inet_getpeer(struct in
72020 unsigned int sequence;
72021 int invalidated, newrefcnt = 0;
72022
72023+ pax_track_stack();
72024+
72025 /* Look up for the address quickly, lockless.
72026 * Because of a concurrent writer, we might not find an existing entry.
72027 */
72028@@ -517,8 +519,8 @@ found: /* The existing node has been fo
72029 if (p) {
72030 p->daddr = *daddr;
72031 atomic_set(&p->refcnt, 1);
72032- atomic_set(&p->rid, 0);
72033- atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72034+ atomic_set_unchecked(&p->rid, 0);
72035+ atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
72036 p->tcp_ts_stamp = 0;
72037 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
72038 p->rate_tokens = 0;
72039diff -urNp linux-3.0.7/net/ipv4/ipconfig.c linux-3.0.7/net/ipv4/ipconfig.c
72040--- linux-3.0.7/net/ipv4/ipconfig.c 2011-07-21 22:17:23.000000000 -0400
72041+++ linux-3.0.7/net/ipv4/ipconfig.c 2011-10-06 04:17:55.000000000 -0400
72042@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72043
72044 mm_segment_t oldfs = get_fs();
72045 set_fs(get_ds());
72046- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72047+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72048 set_fs(oldfs);
72049 return res;
72050 }
72051@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72052
72053 mm_segment_t oldfs = get_fs();
72054 set_fs(get_ds());
72055- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72056+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72057 set_fs(oldfs);
72058 return res;
72059 }
72060@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72061
72062 mm_segment_t oldfs = get_fs();
72063 set_fs(get_ds());
72064- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72065+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72066 set_fs(oldfs);
72067 return res;
72068 }
72069diff -urNp linux-3.0.7/net/ipv4/ip_fragment.c linux-3.0.7/net/ipv4/ip_fragment.c
72070--- linux-3.0.7/net/ipv4/ip_fragment.c 2011-07-21 22:17:23.000000000 -0400
72071+++ linux-3.0.7/net/ipv4/ip_fragment.c 2011-08-23 21:47:56.000000000 -0400
72072@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct
72073 return 0;
72074
72075 start = qp->rid;
72076- end = atomic_inc_return(&peer->rid);
72077+ end = atomic_inc_return_unchecked(&peer->rid);
72078 qp->rid = end;
72079
72080 rc = qp->q.fragments && (end - start) > max;
72081diff -urNp linux-3.0.7/net/ipv4/ip_sockglue.c linux-3.0.7/net/ipv4/ip_sockglue.c
72082--- linux-3.0.7/net/ipv4/ip_sockglue.c 2011-07-21 22:17:23.000000000 -0400
72083+++ linux-3.0.7/net/ipv4/ip_sockglue.c 2011-10-06 04:17:55.000000000 -0400
72084@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72085 int val;
72086 int len;
72087
72088+ pax_track_stack();
72089+
72090 if (level != SOL_IP)
72091 return -EOPNOTSUPP;
72092
72093@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72094 len = min_t(unsigned int, len, opt->optlen);
72095 if (put_user(len, optlen))
72096 return -EFAULT;
72097- if (copy_to_user(optval, opt->__data, len))
72098+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72099+ copy_to_user(optval, opt->__data, len))
72100 return -EFAULT;
72101 return 0;
72102 }
72103@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72104 if (sk->sk_type != SOCK_STREAM)
72105 return -ENOPROTOOPT;
72106
72107- msg.msg_control = optval;
72108+ msg.msg_control = (void __force_kernel *)optval;
72109 msg.msg_controllen = len;
72110 msg.msg_flags = 0;
72111
72112diff -urNp linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c
72113--- linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-07-21 22:17:23.000000000 -0400
72114+++ linux-3.0.7/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-23 21:47:56.000000000 -0400
72115@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72116
72117 *len = 0;
72118
72119- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72120+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72121 if (*octets == NULL) {
72122 if (net_ratelimit())
72123 pr_notice("OOM in bsalg (%d)\n", __LINE__);
72124diff -urNp linux-3.0.7/net/ipv4/ping.c linux-3.0.7/net/ipv4/ping.c
72125--- linux-3.0.7/net/ipv4/ping.c 2011-07-21 22:17:23.000000000 -0400
72126+++ linux-3.0.7/net/ipv4/ping.c 2011-08-23 21:47:56.000000000 -0400
72127@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72128 sk_rmem_alloc_get(sp),
72129 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72130 atomic_read(&sp->sk_refcnt), sp,
72131- atomic_read(&sp->sk_drops), len);
72132+ atomic_read_unchecked(&sp->sk_drops), len);
72133 }
72134
72135 static int ping_seq_show(struct seq_file *seq, void *v)
72136diff -urNp linux-3.0.7/net/ipv4/raw.c linux-3.0.7/net/ipv4/raw.c
72137--- linux-3.0.7/net/ipv4/raw.c 2011-07-21 22:17:23.000000000 -0400
72138+++ linux-3.0.7/net/ipv4/raw.c 2011-08-23 21:48:14.000000000 -0400
72139@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72140 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72141 {
72142 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72143- atomic_inc(&sk->sk_drops);
72144+ atomic_inc_unchecked(&sk->sk_drops);
72145 kfree_skb(skb);
72146 return NET_RX_DROP;
72147 }
72148@@ -736,16 +736,20 @@ static int raw_init(struct sock *sk)
72149
72150 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72151 {
72152+ struct icmp_filter filter;
72153+
72154 if (optlen > sizeof(struct icmp_filter))
72155 optlen = sizeof(struct icmp_filter);
72156- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72157+ if (copy_from_user(&filter, optval, optlen))
72158 return -EFAULT;
72159+ raw_sk(sk)->filter = filter;
72160 return 0;
72161 }
72162
72163 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72164 {
72165 int len, ret = -EFAULT;
72166+ struct icmp_filter filter;
72167
72168 if (get_user(len, optlen))
72169 goto out;
72170@@ -755,8 +759,9 @@ static int raw_geticmpfilter(struct sock
72171 if (len > sizeof(struct icmp_filter))
72172 len = sizeof(struct icmp_filter);
72173 ret = -EFAULT;
72174- if (put_user(len, optlen) ||
72175- copy_to_user(optval, &raw_sk(sk)->filter, len))
72176+ filter = raw_sk(sk)->filter;
72177+ if (put_user(len, optlen) || len > sizeof filter ||
72178+ copy_to_user(optval, &filter, len))
72179 goto out;
72180 ret = 0;
72181 out: return ret;
72182@@ -984,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72183 sk_wmem_alloc_get(sp),
72184 sk_rmem_alloc_get(sp),
72185 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72186- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72187+ atomic_read(&sp->sk_refcnt),
72188+#ifdef CONFIG_GRKERNSEC_HIDESYM
72189+ NULL,
72190+#else
72191+ sp,
72192+#endif
72193+ atomic_read_unchecked(&sp->sk_drops));
72194 }
72195
72196 static int raw_seq_show(struct seq_file *seq, void *v)
72197diff -urNp linux-3.0.7/net/ipv4/route.c linux-3.0.7/net/ipv4/route.c
72198--- linux-3.0.7/net/ipv4/route.c 2011-10-16 21:54:54.000000000 -0400
72199+++ linux-3.0.7/net/ipv4/route.c 2011-10-16 21:55:28.000000000 -0400
72200@@ -304,7 +304,7 @@ static inline unsigned int rt_hash(__be3
72201
72202 static inline int rt_genid(struct net *net)
72203 {
72204- return atomic_read(&net->ipv4.rt_genid);
72205+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72206 }
72207
72208 #ifdef CONFIG_PROC_FS
72209@@ -832,7 +832,7 @@ static void rt_cache_invalidate(struct n
72210 unsigned char shuffle;
72211
72212 get_random_bytes(&shuffle, sizeof(shuffle));
72213- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72214+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72215 }
72216
72217 /*
72218@@ -2832,7 +2832,7 @@ static int rt_fill_info(struct net *net,
72219 error = rt->dst.error;
72220 if (peer) {
72221 inet_peer_refcheck(rt->peer);
72222- id = atomic_read(&peer->ip_id_count) & 0xffff;
72223+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72224 if (peer->tcp_ts_stamp) {
72225 ts = peer->tcp_ts;
72226 tsage = get_seconds() - peer->tcp_ts_stamp;
72227diff -urNp linux-3.0.7/net/ipv4/tcp.c linux-3.0.7/net/ipv4/tcp.c
72228--- linux-3.0.7/net/ipv4/tcp.c 2011-07-21 22:17:23.000000000 -0400
72229+++ linux-3.0.7/net/ipv4/tcp.c 2011-08-23 21:48:14.000000000 -0400
72230@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72231 int val;
72232 int err = 0;
72233
72234+ pax_track_stack();
72235+
72236 /* These are data/string values, all the others are ints */
72237 switch (optname) {
72238 case TCP_CONGESTION: {
72239@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72240 struct tcp_sock *tp = tcp_sk(sk);
72241 int val, len;
72242
72243+ pax_track_stack();
72244+
72245 if (get_user(len, optlen))
72246 return -EFAULT;
72247
72248diff -urNp linux-3.0.7/net/ipv4/tcp_ipv4.c linux-3.0.7/net/ipv4/tcp_ipv4.c
72249--- linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-09-02 18:11:21.000000000 -0400
72250+++ linux-3.0.7/net/ipv4/tcp_ipv4.c 2011-08-23 21:48:14.000000000 -0400
72251@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72252 int sysctl_tcp_low_latency __read_mostly;
72253 EXPORT_SYMBOL(sysctl_tcp_low_latency);
72254
72255+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72256+extern int grsec_enable_blackhole;
72257+#endif
72258
72259 #ifdef CONFIG_TCP_MD5SIG
72260 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72261@@ -1607,6 +1610,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72262 return 0;
72263
72264 reset:
72265+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72266+ if (!grsec_enable_blackhole)
72267+#endif
72268 tcp_v4_send_reset(rsk, skb);
72269 discard:
72270 kfree_skb(skb);
72271@@ -1669,12 +1675,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72272 TCP_SKB_CB(skb)->sacked = 0;
72273
72274 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72275- if (!sk)
72276+ if (!sk) {
72277+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72278+ ret = 1;
72279+#endif
72280 goto no_tcp_socket;
72281-
72282+ }
72283 process:
72284- if (sk->sk_state == TCP_TIME_WAIT)
72285+ if (sk->sk_state == TCP_TIME_WAIT) {
72286+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72287+ ret = 2;
72288+#endif
72289 goto do_time_wait;
72290+ }
72291
72292 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72293 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72294@@ -1724,6 +1737,10 @@ no_tcp_socket:
72295 bad_packet:
72296 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72297 } else {
72298+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72299+ if (!grsec_enable_blackhole || (ret == 1 &&
72300+ (skb->dev->flags & IFF_LOOPBACK)))
72301+#endif
72302 tcp_v4_send_reset(NULL, skb);
72303 }
72304
72305@@ -2388,7 +2405,11 @@ static void get_openreq4(struct sock *sk
72306 0, /* non standard timer */
72307 0, /* open_requests have no inode */
72308 atomic_read(&sk->sk_refcnt),
72309+#ifdef CONFIG_GRKERNSEC_HIDESYM
72310+ NULL,
72311+#else
72312 req,
72313+#endif
72314 len);
72315 }
72316
72317@@ -2438,7 +2459,12 @@ static void get_tcp4_sock(struct sock *s
72318 sock_i_uid(sk),
72319 icsk->icsk_probes_out,
72320 sock_i_ino(sk),
72321- atomic_read(&sk->sk_refcnt), sk,
72322+ atomic_read(&sk->sk_refcnt),
72323+#ifdef CONFIG_GRKERNSEC_HIDESYM
72324+ NULL,
72325+#else
72326+ sk,
72327+#endif
72328 jiffies_to_clock_t(icsk->icsk_rto),
72329 jiffies_to_clock_t(icsk->icsk_ack.ato),
72330 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72331@@ -2466,7 +2492,13 @@ static void get_timewait4_sock(struct in
72332 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72333 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72334 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72335- atomic_read(&tw->tw_refcnt), tw, len);
72336+ atomic_read(&tw->tw_refcnt),
72337+#ifdef CONFIG_GRKERNSEC_HIDESYM
72338+ NULL,
72339+#else
72340+ tw,
72341+#endif
72342+ len);
72343 }
72344
72345 #define TMPSZ 150
72346diff -urNp linux-3.0.7/net/ipv4/tcp_minisocks.c linux-3.0.7/net/ipv4/tcp_minisocks.c
72347--- linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-07-21 22:17:23.000000000 -0400
72348+++ linux-3.0.7/net/ipv4/tcp_minisocks.c 2011-08-23 21:48:14.000000000 -0400
72349@@ -27,6 +27,10 @@
72350 #include <net/inet_common.h>
72351 #include <net/xfrm.h>
72352
72353+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72354+extern int grsec_enable_blackhole;
72355+#endif
72356+
72357 int sysctl_tcp_syncookies __read_mostly = 1;
72358 EXPORT_SYMBOL(sysctl_tcp_syncookies);
72359
72360@@ -745,6 +749,10 @@ listen_overflow:
72361
72362 embryonic_reset:
72363 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72364+
72365+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72366+ if (!grsec_enable_blackhole)
72367+#endif
72368 if (!(flg & TCP_FLAG_RST))
72369 req->rsk_ops->send_reset(sk, skb);
72370
72371diff -urNp linux-3.0.7/net/ipv4/tcp_output.c linux-3.0.7/net/ipv4/tcp_output.c
72372--- linux-3.0.7/net/ipv4/tcp_output.c 2011-07-21 22:17:23.000000000 -0400
72373+++ linux-3.0.7/net/ipv4/tcp_output.c 2011-08-23 21:48:14.000000000 -0400
72374@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72375 int mss;
72376 int s_data_desired = 0;
72377
72378+ pax_track_stack();
72379+
72380 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72381 s_data_desired = cvp->s_data_desired;
72382 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72383diff -urNp linux-3.0.7/net/ipv4/tcp_probe.c linux-3.0.7/net/ipv4/tcp_probe.c
72384--- linux-3.0.7/net/ipv4/tcp_probe.c 2011-07-21 22:17:23.000000000 -0400
72385+++ linux-3.0.7/net/ipv4/tcp_probe.c 2011-08-23 21:47:56.000000000 -0400
72386@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72387 if (cnt + width >= len)
72388 break;
72389
72390- if (copy_to_user(buf + cnt, tbuf, width))
72391+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72392 return -EFAULT;
72393 cnt += width;
72394 }
72395diff -urNp linux-3.0.7/net/ipv4/tcp_timer.c linux-3.0.7/net/ipv4/tcp_timer.c
72396--- linux-3.0.7/net/ipv4/tcp_timer.c 2011-07-21 22:17:23.000000000 -0400
72397+++ linux-3.0.7/net/ipv4/tcp_timer.c 2011-08-23 21:48:14.000000000 -0400
72398@@ -22,6 +22,10 @@
72399 #include <linux/gfp.h>
72400 #include <net/tcp.h>
72401
72402+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72403+extern int grsec_lastack_retries;
72404+#endif
72405+
72406 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72407 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72408 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72409@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72410 }
72411 }
72412
72413+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72414+ if ((sk->sk_state == TCP_LAST_ACK) &&
72415+ (grsec_lastack_retries > 0) &&
72416+ (grsec_lastack_retries < retry_until))
72417+ retry_until = grsec_lastack_retries;
72418+#endif
72419+
72420 if (retransmits_timed_out(sk, retry_until,
72421 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72422 /* Has it gone just too far? */
72423diff -urNp linux-3.0.7/net/ipv4/udp.c linux-3.0.7/net/ipv4/udp.c
72424--- linux-3.0.7/net/ipv4/udp.c 2011-07-21 22:17:23.000000000 -0400
72425+++ linux-3.0.7/net/ipv4/udp.c 2011-08-23 21:48:14.000000000 -0400
72426@@ -86,6 +86,7 @@
72427 #include <linux/types.h>
72428 #include <linux/fcntl.h>
72429 #include <linux/module.h>
72430+#include <linux/security.h>
72431 #include <linux/socket.h>
72432 #include <linux/sockios.h>
72433 #include <linux/igmp.h>
72434@@ -107,6 +108,10 @@
72435 #include <net/xfrm.h>
72436 #include "udp_impl.h"
72437
72438+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72439+extern int grsec_enable_blackhole;
72440+#endif
72441+
72442 struct udp_table udp_table __read_mostly;
72443 EXPORT_SYMBOL(udp_table);
72444
72445@@ -564,6 +569,9 @@ found:
72446 return s;
72447 }
72448
72449+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72450+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72451+
72452 /*
72453 * This routine is called by the ICMP module when it gets some
72454 * sort of error condition. If err < 0 then the socket should
72455@@ -855,9 +863,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72456 dport = usin->sin_port;
72457 if (dport == 0)
72458 return -EINVAL;
72459+
72460+ err = gr_search_udp_sendmsg(sk, usin);
72461+ if (err)
72462+ return err;
72463 } else {
72464 if (sk->sk_state != TCP_ESTABLISHED)
72465 return -EDESTADDRREQ;
72466+
72467+ err = gr_search_udp_sendmsg(sk, NULL);
72468+ if (err)
72469+ return err;
72470+
72471 daddr = inet->inet_daddr;
72472 dport = inet->inet_dport;
72473 /* Open fast path for connected socket.
72474@@ -1098,7 +1115,7 @@ static unsigned int first_packet_length(
72475 udp_lib_checksum_complete(skb)) {
72476 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72477 IS_UDPLITE(sk));
72478- atomic_inc(&sk->sk_drops);
72479+ atomic_inc_unchecked(&sk->sk_drops);
72480 __skb_unlink(skb, rcvq);
72481 __skb_queue_tail(&list_kill, skb);
72482 }
72483@@ -1184,6 +1201,10 @@ try_again:
72484 if (!skb)
72485 goto out;
72486
72487+ err = gr_search_udp_recvmsg(sk, skb);
72488+ if (err)
72489+ goto out_free;
72490+
72491 ulen = skb->len - sizeof(struct udphdr);
72492 if (len > ulen)
72493 len = ulen;
72494@@ -1483,7 +1504,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72495
72496 drop:
72497 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72498- atomic_inc(&sk->sk_drops);
72499+ atomic_inc_unchecked(&sk->sk_drops);
72500 kfree_skb(skb);
72501 return -1;
72502 }
72503@@ -1502,7 +1523,7 @@ static void flush_stack(struct sock **st
72504 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72505
72506 if (!skb1) {
72507- atomic_inc(&sk->sk_drops);
72508+ atomic_inc_unchecked(&sk->sk_drops);
72509 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72510 IS_UDPLITE(sk));
72511 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72512@@ -1671,6 +1692,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72513 goto csum_error;
72514
72515 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72516+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72517+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72518+#endif
72519 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72520
72521 /*
72522@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock
72523 sk_wmem_alloc_get(sp),
72524 sk_rmem_alloc_get(sp),
72525 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72526- atomic_read(&sp->sk_refcnt), sp,
72527- atomic_read(&sp->sk_drops), len);
72528+ atomic_read(&sp->sk_refcnt),
72529+#ifdef CONFIG_GRKERNSEC_HIDESYM
72530+ NULL,
72531+#else
72532+ sp,
72533+#endif
72534+ atomic_read_unchecked(&sp->sk_drops), len);
72535 }
72536
72537 int udp4_seq_show(struct seq_file *seq, void *v)
72538diff -urNp linux-3.0.7/net/ipv6/addrconf.c linux-3.0.7/net/ipv6/addrconf.c
72539--- linux-3.0.7/net/ipv6/addrconf.c 2011-07-21 22:17:23.000000000 -0400
72540+++ linux-3.0.7/net/ipv6/addrconf.c 2011-10-06 04:17:55.000000000 -0400
72541@@ -2072,7 +2072,7 @@ int addrconf_set_dstaddr(struct net *net
72542 p.iph.ihl = 5;
72543 p.iph.protocol = IPPROTO_IPV6;
72544 p.iph.ttl = 64;
72545- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
72546+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
72547
72548 if (ops->ndo_do_ioctl) {
72549 mm_segment_t oldfs = get_fs();
72550diff -urNp linux-3.0.7/net/ipv6/inet6_connection_sock.c linux-3.0.7/net/ipv6/inet6_connection_sock.c
72551--- linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-07-21 22:17:23.000000000 -0400
72552+++ linux-3.0.7/net/ipv6/inet6_connection_sock.c 2011-08-23 21:47:56.000000000 -0400
72553@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
72554 #ifdef CONFIG_XFRM
72555 {
72556 struct rt6_info *rt = (struct rt6_info *)dst;
72557- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72558+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72559 }
72560 #endif
72561 }
72562@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
72563 #ifdef CONFIG_XFRM
72564 if (dst) {
72565 struct rt6_info *rt = (struct rt6_info *)dst;
72566- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72567+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72568 __sk_dst_reset(sk);
72569 dst = NULL;
72570 }
72571diff -urNp linux-3.0.7/net/ipv6/ipv6_sockglue.c linux-3.0.7/net/ipv6/ipv6_sockglue.c
72572--- linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:54:54.000000000 -0400
72573+++ linux-3.0.7/net/ipv6/ipv6_sockglue.c 2011-10-16 21:55:28.000000000 -0400
72574@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
72575 int val, valbool;
72576 int retv = -ENOPROTOOPT;
72577
72578+ pax_track_stack();
72579+
72580 if (optval == NULL)
72581 val=0;
72582 else {
72583@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
72584 int len;
72585 int val;
72586
72587+ pax_track_stack();
72588+
72589 if (ip6_mroute_opt(optname))
72590 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72591
72592@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
72593 if (sk->sk_type != SOCK_STREAM)
72594 return -ENOPROTOOPT;
72595
72596- msg.msg_control = optval;
72597+ msg.msg_control = (void __force_kernel *)optval;
72598 msg.msg_controllen = len;
72599 msg.msg_flags = flags;
72600
72601diff -urNp linux-3.0.7/net/ipv6/raw.c linux-3.0.7/net/ipv6/raw.c
72602--- linux-3.0.7/net/ipv6/raw.c 2011-07-21 22:17:23.000000000 -0400
72603+++ linux-3.0.7/net/ipv6/raw.c 2011-08-23 21:48:14.000000000 -0400
72604@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
72605 {
72606 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
72607 skb_checksum_complete(skb)) {
72608- atomic_inc(&sk->sk_drops);
72609+ atomic_inc_unchecked(&sk->sk_drops);
72610 kfree_skb(skb);
72611 return NET_RX_DROP;
72612 }
72613@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72614 struct raw6_sock *rp = raw6_sk(sk);
72615
72616 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72617- atomic_inc(&sk->sk_drops);
72618+ atomic_inc_unchecked(&sk->sk_drops);
72619 kfree_skb(skb);
72620 return NET_RX_DROP;
72621 }
72622@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72623
72624 if (inet->hdrincl) {
72625 if (skb_checksum_complete(skb)) {
72626- atomic_inc(&sk->sk_drops);
72627+ atomic_inc_unchecked(&sk->sk_drops);
72628 kfree_skb(skb);
72629 return NET_RX_DROP;
72630 }
72631@@ -601,7 +601,7 @@ out:
72632 return err;
72633 }
72634
72635-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72636+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72637 struct flowi6 *fl6, struct dst_entry **dstp,
72638 unsigned int flags)
72639 {
72640@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
72641 u16 proto;
72642 int err;
72643
72644+ pax_track_stack();
72645+
72646 /* Rough check on arithmetic overflow,
72647 better check is made in ip6_append_data().
72648 */
72649@@ -909,12 +911,15 @@ do_confirm:
72650 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72651 char __user *optval, int optlen)
72652 {
72653+ struct icmp6_filter filter;
72654+
72655 switch (optname) {
72656 case ICMPV6_FILTER:
72657 if (optlen > sizeof(struct icmp6_filter))
72658 optlen = sizeof(struct icmp6_filter);
72659- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72660+ if (copy_from_user(&filter, optval, optlen))
72661 return -EFAULT;
72662+ raw6_sk(sk)->filter = filter;
72663 return 0;
72664 default:
72665 return -ENOPROTOOPT;
72666@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
72667 char __user *optval, int __user *optlen)
72668 {
72669 int len;
72670+ struct icmp6_filter filter;
72671
72672 switch (optname) {
72673 case ICMPV6_FILTER:
72674@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
72675 len = sizeof(struct icmp6_filter);
72676 if (put_user(len, optlen))
72677 return -EFAULT;
72678- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72679+ filter = raw6_sk(sk)->filter;
72680+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72681 return -EFAULT;
72682 return 0;
72683 default:
72684@@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
72685 0, 0L, 0,
72686 sock_i_uid(sp), 0,
72687 sock_i_ino(sp),
72688- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72689+ atomic_read(&sp->sk_refcnt),
72690+#ifdef CONFIG_GRKERNSEC_HIDESYM
72691+ NULL,
72692+#else
72693+ sp,
72694+#endif
72695+ atomic_read_unchecked(&sp->sk_drops));
72696 }
72697
72698 static int raw6_seq_show(struct seq_file *seq, void *v)
72699diff -urNp linux-3.0.7/net/ipv6/tcp_ipv6.c linux-3.0.7/net/ipv6/tcp_ipv6.c
72700--- linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-09-02 18:11:21.000000000 -0400
72701+++ linux-3.0.7/net/ipv6/tcp_ipv6.c 2011-08-23 21:48:14.000000000 -0400
72702@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72703 }
72704 #endif
72705
72706+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72707+extern int grsec_enable_blackhole;
72708+#endif
72709+
72710 static void tcp_v6_hash(struct sock *sk)
72711 {
72712 if (sk->sk_state != TCP_CLOSE) {
72713@@ -1662,6 +1666,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72714 return 0;
72715
72716 reset:
72717+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72718+ if (!grsec_enable_blackhole)
72719+#endif
72720 tcp_v6_send_reset(sk, skb);
72721 discard:
72722 if (opt_skb)
72723@@ -1741,12 +1748,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72724 TCP_SKB_CB(skb)->sacked = 0;
72725
72726 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72727- if (!sk)
72728+ if (!sk) {
72729+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72730+ ret = 1;
72731+#endif
72732 goto no_tcp_socket;
72733+ }
72734
72735 process:
72736- if (sk->sk_state == TCP_TIME_WAIT)
72737+ if (sk->sk_state == TCP_TIME_WAIT) {
72738+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72739+ ret = 2;
72740+#endif
72741 goto do_time_wait;
72742+ }
72743
72744 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
72745 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72746@@ -1794,6 +1809,10 @@ no_tcp_socket:
72747 bad_packet:
72748 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72749 } else {
72750+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72751+ if (!grsec_enable_blackhole || (ret == 1 &&
72752+ (skb->dev->flags & IFF_LOOPBACK)))
72753+#endif
72754 tcp_v6_send_reset(NULL, skb);
72755 }
72756
72757@@ -2054,7 +2073,13 @@ static void get_openreq6(struct seq_file
72758 uid,
72759 0, /* non standard timer */
72760 0, /* open_requests have no inode */
72761- 0, req);
72762+ 0,
72763+#ifdef CONFIG_GRKERNSEC_HIDESYM
72764+ NULL
72765+#else
72766+ req
72767+#endif
72768+ );
72769 }
72770
72771 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72772@@ -2104,7 +2129,12 @@ static void get_tcp6_sock(struct seq_fil
72773 sock_i_uid(sp),
72774 icsk->icsk_probes_out,
72775 sock_i_ino(sp),
72776- atomic_read(&sp->sk_refcnt), sp,
72777+ atomic_read(&sp->sk_refcnt),
72778+#ifdef CONFIG_GRKERNSEC_HIDESYM
72779+ NULL,
72780+#else
72781+ sp,
72782+#endif
72783 jiffies_to_clock_t(icsk->icsk_rto),
72784 jiffies_to_clock_t(icsk->icsk_ack.ato),
72785 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72786@@ -2139,7 +2169,13 @@ static void get_timewait6_sock(struct se
72787 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72788 tw->tw_substate, 0, 0,
72789 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72790- atomic_read(&tw->tw_refcnt), tw);
72791+ atomic_read(&tw->tw_refcnt),
72792+#ifdef CONFIG_GRKERNSEC_HIDESYM
72793+ NULL
72794+#else
72795+ tw
72796+#endif
72797+ );
72798 }
72799
72800 static int tcp6_seq_show(struct seq_file *seq, void *v)
72801diff -urNp linux-3.0.7/net/ipv6/udp.c linux-3.0.7/net/ipv6/udp.c
72802--- linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:09.000000000 -0400
72803+++ linux-3.0.7/net/ipv6/udp.c 2011-10-17 23:17:19.000000000 -0400
72804@@ -50,6 +50,10 @@
72805 #include <linux/seq_file.h>
72806 #include "udp_impl.h"
72807
72808+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72809+extern int grsec_enable_blackhole;
72810+#endif
72811+
72812 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72813 {
72814 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72815@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72816
72817 return 0;
72818 drop:
72819- atomic_inc(&sk->sk_drops);
72820+ atomic_inc_unchecked(&sk->sk_drops);
72821 drop_no_sk_drops_inc:
72822 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72823 kfree_skb(skb);
72824@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
72825 continue;
72826 }
72827 drop:
72828- atomic_inc(&sk->sk_drops);
72829+ atomic_inc_unchecked(&sk->sk_drops);
72830 UDP6_INC_STATS_BH(sock_net(sk),
72831 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
72832 UDP6_INC_STATS_BH(sock_net(sk),
72833@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72834 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72835 proto == IPPROTO_UDPLITE);
72836
72837+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72838+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72839+#endif
72840 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
72841
72842 kfree_skb(skb);
72843@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72844 if (!sock_owned_by_user(sk))
72845 udpv6_queue_rcv_skb(sk, skb);
72846 else if (sk_add_backlog(sk, skb)) {
72847- atomic_inc(&sk->sk_drops);
72848+ atomic_inc_unchecked(&sk->sk_drops);
72849 bh_unlock_sock(sk);
72850 sock_put(sk);
72851 goto discard;
72852@@ -1408,8 +1415,13 @@ static void udp6_sock_seq_show(struct se
72853 0, 0L, 0,
72854 sock_i_uid(sp), 0,
72855 sock_i_ino(sp),
72856- atomic_read(&sp->sk_refcnt), sp,
72857- atomic_read(&sp->sk_drops));
72858+ atomic_read(&sp->sk_refcnt),
72859+#ifdef CONFIG_GRKERNSEC_HIDESYM
72860+ NULL,
72861+#else
72862+ sp,
72863+#endif
72864+ atomic_read_unchecked(&sp->sk_drops));
72865 }
72866
72867 int udp6_seq_show(struct seq_file *seq, void *v)
72868diff -urNp linux-3.0.7/net/irda/ircomm/ircomm_tty.c linux-3.0.7/net/irda/ircomm/ircomm_tty.c
72869--- linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-07-21 22:17:23.000000000 -0400
72870+++ linux-3.0.7/net/irda/ircomm/ircomm_tty.c 2011-08-23 21:47:56.000000000 -0400
72871@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
72872 add_wait_queue(&self->open_wait, &wait);
72873
72874 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72875- __FILE__,__LINE__, tty->driver->name, self->open_count );
72876+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72877
72878 /* As far as I can see, we protect open_count - Jean II */
72879 spin_lock_irqsave(&self->spinlock, flags);
72880 if (!tty_hung_up_p(filp)) {
72881 extra_count = 1;
72882- self->open_count--;
72883+ local_dec(&self->open_count);
72884 }
72885 spin_unlock_irqrestore(&self->spinlock, flags);
72886- self->blocked_open++;
72887+ local_inc(&self->blocked_open);
72888
72889 while (1) {
72890 if (tty->termios->c_cflag & CBAUD) {
72891@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
72892 }
72893
72894 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72895- __FILE__,__LINE__, tty->driver->name, self->open_count );
72896+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72897
72898 schedule();
72899 }
72900@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
72901 if (extra_count) {
72902 /* ++ is not atomic, so this should be protected - Jean II */
72903 spin_lock_irqsave(&self->spinlock, flags);
72904- self->open_count++;
72905+ local_inc(&self->open_count);
72906 spin_unlock_irqrestore(&self->spinlock, flags);
72907 }
72908- self->blocked_open--;
72909+ local_dec(&self->blocked_open);
72910
72911 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72912- __FILE__,__LINE__, tty->driver->name, self->open_count);
72913+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72914
72915 if (!retval)
72916 self->flags |= ASYNC_NORMAL_ACTIVE;
72917@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
72918 }
72919 /* ++ is not atomic, so this should be protected - Jean II */
72920 spin_lock_irqsave(&self->spinlock, flags);
72921- self->open_count++;
72922+ local_inc(&self->open_count);
72923
72924 tty->driver_data = self;
72925 self->tty = tty;
72926 spin_unlock_irqrestore(&self->spinlock, flags);
72927
72928 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72929- self->line, self->open_count);
72930+ self->line, local_read(&self->open_count));
72931
72932 /* Not really used by us, but lets do it anyway */
72933 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72934@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
72935 return;
72936 }
72937
72938- if ((tty->count == 1) && (self->open_count != 1)) {
72939+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72940 /*
72941 * Uh, oh. tty->count is 1, which means that the tty
72942 * structure will be freed. state->count should always
72943@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
72944 */
72945 IRDA_DEBUG(0, "%s(), bad serial port count; "
72946 "tty->count is 1, state->count is %d\n", __func__ ,
72947- self->open_count);
72948- self->open_count = 1;
72949+ local_read(&self->open_count));
72950+ local_set(&self->open_count, 1);
72951 }
72952
72953- if (--self->open_count < 0) {
72954+ if (local_dec_return(&self->open_count) < 0) {
72955 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72956- __func__, self->line, self->open_count);
72957- self->open_count = 0;
72958+ __func__, self->line, local_read(&self->open_count));
72959+ local_set(&self->open_count, 0);
72960 }
72961- if (self->open_count) {
72962+ if (local_read(&self->open_count)) {
72963 spin_unlock_irqrestore(&self->spinlock, flags);
72964
72965 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72966@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
72967 tty->closing = 0;
72968 self->tty = NULL;
72969
72970- if (self->blocked_open) {
72971+ if (local_read(&self->blocked_open)) {
72972 if (self->close_delay)
72973 schedule_timeout_interruptible(self->close_delay);
72974 wake_up_interruptible(&self->open_wait);
72975@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
72976 spin_lock_irqsave(&self->spinlock, flags);
72977 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72978 self->tty = NULL;
72979- self->open_count = 0;
72980+ local_set(&self->open_count, 0);
72981 spin_unlock_irqrestore(&self->spinlock, flags);
72982
72983 wake_up_interruptible(&self->open_wait);
72984@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
72985 seq_putc(m, '\n');
72986
72987 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72988- seq_printf(m, "Open count: %d\n", self->open_count);
72989+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72990 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72991 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72992
72993diff -urNp linux-3.0.7/net/iucv/af_iucv.c linux-3.0.7/net/iucv/af_iucv.c
72994--- linux-3.0.7/net/iucv/af_iucv.c 2011-07-21 22:17:23.000000000 -0400
72995+++ linux-3.0.7/net/iucv/af_iucv.c 2011-08-23 21:47:56.000000000 -0400
72996@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
72997
72998 write_lock_bh(&iucv_sk_list.lock);
72999
73000- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73001+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73002 while (__iucv_get_sock_by_name(name)) {
73003 sprintf(name, "%08x",
73004- atomic_inc_return(&iucv_sk_list.autobind_name));
73005+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73006 }
73007
73008 write_unlock_bh(&iucv_sk_list.lock);
73009diff -urNp linux-3.0.7/net/key/af_key.c linux-3.0.7/net/key/af_key.c
73010--- linux-3.0.7/net/key/af_key.c 2011-07-21 22:17:23.000000000 -0400
73011+++ linux-3.0.7/net/key/af_key.c 2011-08-23 21:48:14.000000000 -0400
73012@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73013 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73014 struct xfrm_kmaddress k;
73015
73016+ pax_track_stack();
73017+
73018 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73019 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73020 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73021@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73022 static u32 get_acqseq(void)
73023 {
73024 u32 res;
73025- static atomic_t acqseq;
73026+ static atomic_unchecked_t acqseq;
73027
73028 do {
73029- res = atomic_inc_return(&acqseq);
73030+ res = atomic_inc_return_unchecked(&acqseq);
73031 } while (!res);
73032 return res;
73033 }
73034diff -urNp linux-3.0.7/net/lapb/lapb_iface.c linux-3.0.7/net/lapb/lapb_iface.c
73035--- linux-3.0.7/net/lapb/lapb_iface.c 2011-07-21 22:17:23.000000000 -0400
73036+++ linux-3.0.7/net/lapb/lapb_iface.c 2011-08-23 21:47:56.000000000 -0400
73037@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73038 goto out;
73039
73040 lapb->dev = dev;
73041- lapb->callbacks = *callbacks;
73042+ lapb->callbacks = callbacks;
73043
73044 __lapb_insert_cb(lapb);
73045
73046@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73047
73048 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73049 {
73050- if (lapb->callbacks.connect_confirmation)
73051- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73052+ if (lapb->callbacks->connect_confirmation)
73053+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73054 }
73055
73056 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73057 {
73058- if (lapb->callbacks.connect_indication)
73059- lapb->callbacks.connect_indication(lapb->dev, reason);
73060+ if (lapb->callbacks->connect_indication)
73061+ lapb->callbacks->connect_indication(lapb->dev, reason);
73062 }
73063
73064 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73065 {
73066- if (lapb->callbacks.disconnect_confirmation)
73067- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73068+ if (lapb->callbacks->disconnect_confirmation)
73069+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73070 }
73071
73072 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73073 {
73074- if (lapb->callbacks.disconnect_indication)
73075- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73076+ if (lapb->callbacks->disconnect_indication)
73077+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73078 }
73079
73080 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73081 {
73082- if (lapb->callbacks.data_indication)
73083- return lapb->callbacks.data_indication(lapb->dev, skb);
73084+ if (lapb->callbacks->data_indication)
73085+ return lapb->callbacks->data_indication(lapb->dev, skb);
73086
73087 kfree_skb(skb);
73088 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73089@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73090 {
73091 int used = 0;
73092
73093- if (lapb->callbacks.data_transmit) {
73094- lapb->callbacks.data_transmit(lapb->dev, skb);
73095+ if (lapb->callbacks->data_transmit) {
73096+ lapb->callbacks->data_transmit(lapb->dev, skb);
73097 used = 1;
73098 }
73099
73100diff -urNp linux-3.0.7/net/mac80211/debugfs_sta.c linux-3.0.7/net/mac80211/debugfs_sta.c
73101--- linux-3.0.7/net/mac80211/debugfs_sta.c 2011-07-21 22:17:23.000000000 -0400
73102+++ linux-3.0.7/net/mac80211/debugfs_sta.c 2011-08-23 21:48:14.000000000 -0400
73103@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73104 struct tid_ampdu_rx *tid_rx;
73105 struct tid_ampdu_tx *tid_tx;
73106
73107+ pax_track_stack();
73108+
73109 rcu_read_lock();
73110
73111 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73112@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73113 struct sta_info *sta = file->private_data;
73114 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73115
73116+ pax_track_stack();
73117+
73118 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73119 htc->ht_supported ? "" : "not ");
73120 if (htc->ht_supported) {
73121diff -urNp linux-3.0.7/net/mac80211/ieee80211_i.h linux-3.0.7/net/mac80211/ieee80211_i.h
73122--- linux-3.0.7/net/mac80211/ieee80211_i.h 2011-07-21 22:17:23.000000000 -0400
73123+++ linux-3.0.7/net/mac80211/ieee80211_i.h 2011-08-23 21:47:56.000000000 -0400
73124@@ -27,6 +27,7 @@
73125 #include <net/ieee80211_radiotap.h>
73126 #include <net/cfg80211.h>
73127 #include <net/mac80211.h>
73128+#include <asm/local.h>
73129 #include "key.h"
73130 #include "sta_info.h"
73131
73132@@ -721,7 +722,7 @@ struct ieee80211_local {
73133 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73134 spinlock_t queue_stop_reason_lock;
73135
73136- int open_count;
73137+ local_t open_count;
73138 int monitors, cooked_mntrs;
73139 /* number of interfaces with corresponding FIF_ flags */
73140 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73141diff -urNp linux-3.0.7/net/mac80211/iface.c linux-3.0.7/net/mac80211/iface.c
73142--- linux-3.0.7/net/mac80211/iface.c 2011-09-02 18:11:21.000000000 -0400
73143+++ linux-3.0.7/net/mac80211/iface.c 2011-08-23 21:47:56.000000000 -0400
73144@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73145 break;
73146 }
73147
73148- if (local->open_count == 0) {
73149+ if (local_read(&local->open_count) == 0) {
73150 res = drv_start(local);
73151 if (res)
73152 goto err_del_bss;
73153@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73154 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73155
73156 if (!is_valid_ether_addr(dev->dev_addr)) {
73157- if (!local->open_count)
73158+ if (!local_read(&local->open_count))
73159 drv_stop(local);
73160 return -EADDRNOTAVAIL;
73161 }
73162@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73163 mutex_unlock(&local->mtx);
73164
73165 if (coming_up)
73166- local->open_count++;
73167+ local_inc(&local->open_count);
73168
73169 if (hw_reconf_flags) {
73170 ieee80211_hw_config(local, hw_reconf_flags);
73171@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73172 err_del_interface:
73173 drv_remove_interface(local, &sdata->vif);
73174 err_stop:
73175- if (!local->open_count)
73176+ if (!local_read(&local->open_count))
73177 drv_stop(local);
73178 err_del_bss:
73179 sdata->bss = NULL;
73180@@ -475,7 +475,7 @@ static void ieee80211_do_stop(struct iee
73181 }
73182
73183 if (going_down)
73184- local->open_count--;
73185+ local_dec(&local->open_count);
73186
73187 switch (sdata->vif.type) {
73188 case NL80211_IFTYPE_AP_VLAN:
73189@@ -534,7 +534,7 @@ static void ieee80211_do_stop(struct iee
73190
73191 ieee80211_recalc_ps(local, -1);
73192
73193- if (local->open_count == 0) {
73194+ if (local_read(&local->open_count) == 0) {
73195 if (local->ops->napi_poll)
73196 napi_disable(&local->napi);
73197 ieee80211_clear_tx_pending(local);
73198diff -urNp linux-3.0.7/net/mac80211/main.c linux-3.0.7/net/mac80211/main.c
73199--- linux-3.0.7/net/mac80211/main.c 2011-07-21 22:17:23.000000000 -0400
73200+++ linux-3.0.7/net/mac80211/main.c 2011-08-23 21:47:56.000000000 -0400
73201@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73202 local->hw.conf.power_level = power;
73203 }
73204
73205- if (changed && local->open_count) {
73206+ if (changed && local_read(&local->open_count)) {
73207 ret = drv_config(local, changed);
73208 /*
73209 * Goal:
73210diff -urNp linux-3.0.7/net/mac80211/mlme.c linux-3.0.7/net/mac80211/mlme.c
73211--- linux-3.0.7/net/mac80211/mlme.c 2011-09-02 18:11:21.000000000 -0400
73212+++ linux-3.0.7/net/mac80211/mlme.c 2011-08-23 21:48:14.000000000 -0400
73213@@ -1444,6 +1444,8 @@ static bool ieee80211_assoc_success(stru
73214 bool have_higher_than_11mbit = false;
73215 u16 ap_ht_cap_flags;
73216
73217+ pax_track_stack();
73218+
73219 /* AssocResp and ReassocResp have identical structure */
73220
73221 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73222diff -urNp linux-3.0.7/net/mac80211/pm.c linux-3.0.7/net/mac80211/pm.c
73223--- linux-3.0.7/net/mac80211/pm.c 2011-07-21 22:17:23.000000000 -0400
73224+++ linux-3.0.7/net/mac80211/pm.c 2011-08-23 21:47:56.000000000 -0400
73225@@ -47,7 +47,7 @@ int __ieee80211_suspend(struct ieee80211
73226 cancel_work_sync(&local->dynamic_ps_enable_work);
73227 del_timer_sync(&local->dynamic_ps_timer);
73228
73229- local->wowlan = wowlan && local->open_count;
73230+ local->wowlan = wowlan && local_read(&local->open_count);
73231 if (local->wowlan) {
73232 int err = drv_suspend(local, wowlan);
73233 if (err) {
73234@@ -111,7 +111,7 @@ int __ieee80211_suspend(struct ieee80211
73235 }
73236
73237 /* stop hardware - this must stop RX */
73238- if (local->open_count)
73239+ if (local_read(&local->open_count))
73240 ieee80211_stop_device(local);
73241
73242 suspend:
73243diff -urNp linux-3.0.7/net/mac80211/rate.c linux-3.0.7/net/mac80211/rate.c
73244--- linux-3.0.7/net/mac80211/rate.c 2011-07-21 22:17:23.000000000 -0400
73245+++ linux-3.0.7/net/mac80211/rate.c 2011-08-23 21:47:56.000000000 -0400
73246@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73247
73248 ASSERT_RTNL();
73249
73250- if (local->open_count)
73251+ if (local_read(&local->open_count))
73252 return -EBUSY;
73253
73254 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73255diff -urNp linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c
73256--- linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-07-21 22:17:23.000000000 -0400
73257+++ linux-3.0.7/net/mac80211/rc80211_pid_debugfs.c 2011-08-23 21:47:56.000000000 -0400
73258@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73259
73260 spin_unlock_irqrestore(&events->lock, status);
73261
73262- if (copy_to_user(buf, pb, p))
73263+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73264 return -EFAULT;
73265
73266 return p;
73267diff -urNp linux-3.0.7/net/mac80211/util.c linux-3.0.7/net/mac80211/util.c
73268--- linux-3.0.7/net/mac80211/util.c 2011-07-21 22:17:23.000000000 -0400
73269+++ linux-3.0.7/net/mac80211/util.c 2011-08-23 21:47:56.000000000 -0400
73270@@ -1147,7 +1147,7 @@ int ieee80211_reconfig(struct ieee80211_
73271 #endif
73272
73273 /* restart hardware */
73274- if (local->open_count) {
73275+ if (local_read(&local->open_count)) {
73276 /*
73277 * Upon resume hardware can sometimes be goofy due to
73278 * various platform / driver / bus issues, so restarting
73279diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c
73280--- linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-07-21 22:17:23.000000000 -0400
73281+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_conn.c 2011-08-23 21:47:56.000000000 -0400
73282@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73283 /* Increase the refcnt counter of the dest */
73284 atomic_inc(&dest->refcnt);
73285
73286- conn_flags = atomic_read(&dest->conn_flags);
73287+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73288 if (cp->protocol != IPPROTO_UDP)
73289 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73290 /* Bind with the destination and its corresponding transmitter */
73291@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73292 atomic_set(&cp->refcnt, 1);
73293
73294 atomic_set(&cp->n_control, 0);
73295- atomic_set(&cp->in_pkts, 0);
73296+ atomic_set_unchecked(&cp->in_pkts, 0);
73297
73298 atomic_inc(&ipvs->conn_count);
73299 if (flags & IP_VS_CONN_F_NO_CPORT)
73300@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73301
73302 /* Don't drop the entry if its number of incoming packets is not
73303 located in [0, 8] */
73304- i = atomic_read(&cp->in_pkts);
73305+ i = atomic_read_unchecked(&cp->in_pkts);
73306 if (i > 8 || i < 0) return 0;
73307
73308 if (!todrop_rate[i]) return 0;
73309diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c
73310--- linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-07-21 22:17:23.000000000 -0400
73311+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_core.c 2011-08-23 21:47:56.000000000 -0400
73312@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73313 ret = cp->packet_xmit(skb, cp, pd->pp);
73314 /* do not touch skb anymore */
73315
73316- atomic_inc(&cp->in_pkts);
73317+ atomic_inc_unchecked(&cp->in_pkts);
73318 ip_vs_conn_put(cp);
73319 return ret;
73320 }
73321@@ -1613,7 +1613,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73322 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73323 pkts = sysctl_sync_threshold(ipvs);
73324 else
73325- pkts = atomic_add_return(1, &cp->in_pkts);
73326+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73327
73328 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73329 cp->protocol == IPPROTO_SCTP) {
73330diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c
73331--- linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-09-02 18:11:21.000000000 -0400
73332+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-23 21:48:14.000000000 -0400
73333@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73334 ip_vs_rs_hash(ipvs, dest);
73335 write_unlock_bh(&ipvs->rs_lock);
73336 }
73337- atomic_set(&dest->conn_flags, conn_flags);
73338+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73339
73340 /* bind the service */
73341 if (!dest->svc) {
73342@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73343 " %-7s %-6d %-10d %-10d\n",
73344 &dest->addr.in6,
73345 ntohs(dest->port),
73346- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73347+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73348 atomic_read(&dest->weight),
73349 atomic_read(&dest->activeconns),
73350 atomic_read(&dest->inactconns));
73351@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73352 "%-7s %-6d %-10d %-10d\n",
73353 ntohl(dest->addr.ip),
73354 ntohs(dest->port),
73355- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73356+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73357 atomic_read(&dest->weight),
73358 atomic_read(&dest->activeconns),
73359 atomic_read(&dest->inactconns));
73360@@ -2284,6 +2284,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73361 struct ip_vs_dest_user *udest_compat;
73362 struct ip_vs_dest_user_kern udest;
73363
73364+ pax_track_stack();
73365+
73366 if (!capable(CAP_NET_ADMIN))
73367 return -EPERM;
73368
73369@@ -2498,7 +2500,7 @@ __ip_vs_get_dest_entries(struct net *net
73370
73371 entry.addr = dest->addr.ip;
73372 entry.port = dest->port;
73373- entry.conn_flags = atomic_read(&dest->conn_flags);
73374+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73375 entry.weight = atomic_read(&dest->weight);
73376 entry.u_threshold = dest->u_threshold;
73377 entry.l_threshold = dest->l_threshold;
73378@@ -3026,7 +3028,7 @@ static int ip_vs_genl_fill_dest(struct s
73379 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73380
73381 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73382- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73383+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73384 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73385 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73386 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73387diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c
73388--- linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-07-21 22:17:23.000000000 -0400
73389+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_sync.c 2011-08-23 21:47:56.000000000 -0400
73390@@ -648,7 +648,7 @@ control:
73391 * i.e only increment in_pkts for Templates.
73392 */
73393 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73394- int pkts = atomic_add_return(1, &cp->in_pkts);
73395+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73396
73397 if (pkts % sysctl_sync_period(ipvs) != 1)
73398 return;
73399@@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
73400
73401 if (opt)
73402 memcpy(&cp->in_seq, opt, sizeof(*opt));
73403- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73404+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73405 cp->state = state;
73406 cp->old_state = cp->state;
73407 /*
73408diff -urNp linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c
73409--- linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-07-21 22:17:23.000000000 -0400
73410+++ linux-3.0.7/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-23 21:47:56.000000000 -0400
73411@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73412 else
73413 rc = NF_ACCEPT;
73414 /* do not touch skb anymore */
73415- atomic_inc(&cp->in_pkts);
73416+ atomic_inc_unchecked(&cp->in_pkts);
73417 goto out;
73418 }
73419
73420@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73421 else
73422 rc = NF_ACCEPT;
73423 /* do not touch skb anymore */
73424- atomic_inc(&cp->in_pkts);
73425+ atomic_inc_unchecked(&cp->in_pkts);
73426 goto out;
73427 }
73428
73429diff -urNp linux-3.0.7/net/netfilter/Kconfig linux-3.0.7/net/netfilter/Kconfig
73430--- linux-3.0.7/net/netfilter/Kconfig 2011-07-21 22:17:23.000000000 -0400
73431+++ linux-3.0.7/net/netfilter/Kconfig 2011-08-23 21:48:14.000000000 -0400
73432@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73433
73434 To compile it as a module, choose M here. If unsure, say N.
73435
73436+config NETFILTER_XT_MATCH_GRADM
73437+ tristate '"gradm" match support'
73438+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73439+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73440+ ---help---
73441+ The gradm match allows to match on grsecurity RBAC being enabled.
73442+ It is useful when iptables rules are applied early on bootup to
73443+ prevent connections to the machine (except from a trusted host)
73444+ while the RBAC system is disabled.
73445+
73446 config NETFILTER_XT_MATCH_HASHLIMIT
73447 tristate '"hashlimit" match support'
73448 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73449diff -urNp linux-3.0.7/net/netfilter/Makefile linux-3.0.7/net/netfilter/Makefile
73450--- linux-3.0.7/net/netfilter/Makefile 2011-07-21 22:17:23.000000000 -0400
73451+++ linux-3.0.7/net/netfilter/Makefile 2011-08-23 21:48:14.000000000 -0400
73452@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73453 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73454 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73455 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73456+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73457 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73458 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73459 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73460diff -urNp linux-3.0.7/net/netfilter/nfnetlink_log.c linux-3.0.7/net/netfilter/nfnetlink_log.c
73461--- linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-07-21 22:17:23.000000000 -0400
73462+++ linux-3.0.7/net/netfilter/nfnetlink_log.c 2011-08-23 21:47:56.000000000 -0400
73463@@ -70,7 +70,7 @@ struct nfulnl_instance {
73464 };
73465
73466 static DEFINE_SPINLOCK(instances_lock);
73467-static atomic_t global_seq;
73468+static atomic_unchecked_t global_seq;
73469
73470 #define INSTANCE_BUCKETS 16
73471 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73472@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73473 /* global sequence number */
73474 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73475 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73476- htonl(atomic_inc_return(&global_seq)));
73477+ htonl(atomic_inc_return_unchecked(&global_seq)));
73478
73479 if (data_len) {
73480 struct nlattr *nla;
73481diff -urNp linux-3.0.7/net/netfilter/nfnetlink_queue.c linux-3.0.7/net/netfilter/nfnetlink_queue.c
73482--- linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-07-21 22:17:23.000000000 -0400
73483+++ linux-3.0.7/net/netfilter/nfnetlink_queue.c 2011-08-23 21:47:56.000000000 -0400
73484@@ -58,7 +58,7 @@ struct nfqnl_instance {
73485 */
73486 spinlock_t lock;
73487 unsigned int queue_total;
73488- atomic_t id_sequence; /* 'sequence' of pkt ids */
73489+ atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
73490 struct list_head queue_list; /* packets in queue */
73491 };
73492
73493@@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
73494 nfmsg->version = NFNETLINK_V0;
73495 nfmsg->res_id = htons(queue->queue_num);
73496
73497- entry->id = atomic_inc_return(&queue->id_sequence);
73498+ entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
73499 pmsg.packet_id = htonl(entry->id);
73500 pmsg.hw_protocol = entskb->protocol;
73501 pmsg.hook = entry->hook;
73502@@ -870,7 +870,7 @@ static int seq_show(struct seq_file *s,
73503 inst->peer_pid, inst->queue_total,
73504 inst->copy_mode, inst->copy_range,
73505 inst->queue_dropped, inst->queue_user_dropped,
73506- atomic_read(&inst->id_sequence), 1);
73507+ atomic_read_unchecked(&inst->id_sequence), 1);
73508 }
73509
73510 static const struct seq_operations nfqnl_seq_ops = {
73511diff -urNp linux-3.0.7/net/netfilter/xt_gradm.c linux-3.0.7/net/netfilter/xt_gradm.c
73512--- linux-3.0.7/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73513+++ linux-3.0.7/net/netfilter/xt_gradm.c 2011-08-23 21:48:14.000000000 -0400
73514@@ -0,0 +1,51 @@
73515+/*
73516+ * gradm match for netfilter
73517